1
The following changes since commit 005ad32358f12fe9313a4a01918a55e60d4f39e5:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'pull-tpm-2023-09-12-3' of https://github.com/stefanberger/qemu-tpm into staging (2023-09-13 13:41:57 -0400)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230915
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to e0d9f49c143359b4a34cb80737af57228c62a008:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
accel/tcg: Restrict tcg_exec_[un]realizefn() to TCG (2023-09-15 19:06:29 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
*: Delete checks for old host definitions
14
tcg/optimize: Remove in-flight mask data from OptContext
15
tcg/loongarch64: Generate LSX instructions
15
fpu: Add float*_muladd_scalbn
16
fpu: Add conversions between bfloat16 and [u]int8
16
fpu: Remove float_muladd_halve_result
17
fpu: Handle m68k extended precision denormals properly
17
fpu: Add float_round_nearest_even_max
18
accel/tcg: Improve cputlb i/o organization
18
fpu: Add float_muladd_suppress_add_product_zero
19
accel/tcg: Simplify tlb_plugin_lookup
19
target/hexagon: Use float32_muladd
20
accel/tcg: Remove false-negative halted assertion
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
21
tcg: Add gvec compare with immediate and scalar operand
22
tcg/aarch64: Emit BTI insns at jump landing pads
23
21
24
----------------------------------------------------------------
22
----------------------------------------------------------------
25
Akihiko Odaki (3):
23
Ilya Leoshkevich (1):
26
util: Delete checks for old host definitions
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
27
softmmu: Delete checks for old host definitions
28
thunk: Delete checks for old host definitions
29
25
30
Anton Johansson (9):
26
Pierrick Bouvier (1):
31
target/arm: Replace TARGET_PAGE_ENTRY_EXTRA
27
plugins: optimize cpu_index code generation
32
accel/tcg: Modify tlb_*() to use CPUState
33
accel/tcg: Modify probe_access_internal() to use CPUState
34
accel/tcg: Modify memory access functions to use CPUState
35
accel/tcg: Modify atomic_mmu_lookup() to use CPUState
36
accel/tcg: Use CPUState in atomicity helpers
37
accel/tcg: Remove env_tlb()
38
accel/tcg: Unify user and softmmu do_[st|ld]*_mmu()
39
accel/tcg: move ld/st helpers to ldst_common.c.inc
40
28
41
Jiajie Chen (16):
29
Richard Henderson (70):
42
tcg/loongarch64: Import LSX instructions
30
tcg/optimize: Split out finish_bb, finish_ebb
43
tcg/loongarch64: Lower basic tcg vec ops to LSX
31
tcg/optimize: Split out fold_affected_mask
44
tcg: pass vece to tcg_target_const_match()
32
tcg/optimize: Copy mask writeback to fold_masks
45
tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt
33
tcg/optimize: Split out fold_masks_zs
46
tcg/loongarch64: Lower add/sub_vec to vadd/vsub
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
47
tcg/loongarch64: Lower vector bitwise operations
35
tcg/optimize: Change representation of s_mask
48
tcg/loongarch64: Lower neg_vec to vneg
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
49
tcg/loongarch64: Lower mul_vec to vmul
37
tcg/optimize: Introduce const value accessors for TempOptInfo
50
tcg/loongarch64: Lower vector min max ops
38
tcg/optimize: Use fold_masks_zs in fold_and
51
tcg/loongarch64: Lower vector saturated ops
39
tcg/optimize: Use fold_masks_zs in fold_andc
52
tcg/loongarch64: Lower vector shift vector ops
40
tcg/optimize: Use fold_masks_zs in fold_bswap
53
tcg/loongarch64: Lower bitsel_vec to vbitsel
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
54
tcg/loongarch64: Lower vector shift integer ops
42
tcg/optimize: Use fold_masks_z in fold_ctpop
55
tcg/loongarch64: Lower rotv_vec ops to LSX
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
56
tcg/loongarch64: Lower rotli_vec to vrotri
44
tcg/optimize: Compute sign mask in fold_deposit
57
tcg/loongarch64: Implement 128-bit load & store
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
58
100
59
LIU Zhiwei (2):
101
include/exec/translator.h | 14 -
60
accel/tcg: Fix the comment for CPUTLBEntryFull
102
include/fpu/softfloat-types.h | 2 +
61
fpu: Add conversions between bfloat16 and [u]int8
103
include/fpu/softfloat.h | 14 +-
62
104
include/hw/core/tcg-cpu-ops.h | 13 +
63
Nicholas Piggin (1):
105
target/alpha/cpu.h | 2 +
64
accel/tcg: mttcg remove false-negative halted assertion
106
target/arm/internals.h | 2 +
65
107
target/avr/cpu.h | 2 +
66
Philippe Mathieu-Daudé (16):
108
target/hexagon/cpu.h | 2 +
67
exec: Make EXCP_FOO definitions target agnostic
109
target/hexagon/fma_emu.h | 3 -
68
exec: Move cpu_loop_foo() target agnostic functions to 'cpu-common.h'
110
target/hppa/cpu.h | 2 +
69
accel/tcg: Restrict dump_exec_info() declaration
111
target/i386/tcg/helper-tcg.h | 2 +
70
accel: Make accel-blocker.o target agnostic
112
target/loongarch/internals.h | 2 +
71
accel: Rename accel-common.c -> accel-target.c
113
target/m68k/cpu.h | 2 +
72
exec: Rename cpu.c -> cpu-target.c
114
target/microblaze/cpu.h | 2 +
73
exec: Rename target specific page-vary.c -> page-vary-target.c
115
target/mips/tcg/tcg-internal.h | 2 +
74
accel/tcg: Rename target-specific 'internal.h' -> 'internal-target.h'
116
target/openrisc/cpu.h | 2 +
75
accel/tcg: Make monitor.c a target-agnostic unit
117
target/ppc/cpu.h | 2 +
76
accel/tcg: Make icount.o a target agnostic unit
118
target/riscv/cpu.h | 3 +
77
accel/tcg: Make cpu-exec-common.c a target agnostic unit
119
target/rx/cpu.h | 2 +
78
accel: Rename accel_cpu_realizefn() -> accel_cpu_realize()
120
target/s390x/s390x-internal.h | 2 +
79
accel: Introduce accel_cpu_unrealize() stub
121
target/sh4/cpu.h | 2 +
80
accel: Declare AccelClass::[un]realize_cpu() handlers
122
target/sparc/cpu.h | 2 +
81
accel/tcg: Have tcg_exec_realizefn() return a boolean
123
target/sparc/helper.h | 4 +-
82
accel/tcg: Restrict tcg_exec_[un]realizefn() to TCG
124
target/tricore/cpu.h | 2 +
83
125
target/xtensa/cpu.h | 2 +
84
Richard Henderson (31):
126
accel/tcg/cpu-exec.c | 8 +-
85
tcg: Add gvec compare with immediate and scalar operand
127
accel/tcg/plugin-gen.c | 9 +
86
target/arm: Use tcg_gen_gvec_cmpi for compare vs 0
128
accel/tcg/translate-all.c | 8 +-
87
accel/tcg: Simplify tlb_plugin_lookup
129
fpu/softfloat.c | 63 +--
88
accel/tcg: Split out io_prepare and io_failed
130
target/alpha/cpu.c | 1 +
89
accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed
131
target/alpha/translate.c | 4 +-
90
plugin: Simplify struct qemu_plugin_hwaddr
132
target/arm/cpu.c | 1 +
91
accel/tcg: Merge cpu_transaction_failed into io_failed
133
target/arm/tcg/cpu-v7m.c | 1 +
92
accel/tcg: Replace direct use of io_readx/io_writex in do_{ld,st}_1
134
target/arm/tcg/helper-a64.c | 6 +-
93
accel/tcg: Merge io_readx into do_ld_mmio_beN
135
target/arm/tcg/translate.c | 5 +-
94
accel/tcg: Merge io_writex into do_st_mmio_leN
136
target/avr/cpu.c | 1 +
95
accel/tcg: Introduce do_ld16_mmio_beN
137
target/avr/translate.c | 6 +-
96
accel/tcg: Introduce do_st16_mmio_leN
138
target/hexagon/cpu.c | 1 +
97
fpu: Handle m68k extended precision denormals properly
139
target/hexagon/fma_emu.c | 496 ++++++---------------
98
tcg: Add tcg_out_tb_start backend hook
140
target/hexagon/op_helper.c | 125 ++----
99
util/cpuinfo-aarch64: Add CPUINFO_BTI
141
target/hexagon/translate.c | 4 +-
100
tcg/aarch64: Emit BTI insns at jump landing pads
142
target/hppa/cpu.c | 1 +
101
tcg: Map code_gen_buffer with PROT_BTI
143
target/hppa/translate.c | 4 +-
102
accel/tcg: Move CPUTLB definitions from cpu-defs.h
144
target/i386/tcg/tcg-cpu.c | 1 +
103
qom: Propagate alignment through type system
145
target/i386/tcg/translate.c | 5 +-
104
target/arm: Remove size and alignment for cpu subclasses
146
target/loongarch/cpu.c | 1 +
105
target/*: Add instance_align to all cpu base classes
147
target/loongarch/tcg/translate.c | 4 +-
106
accel/tcg: Validate placement of CPUNegativeOffsetState
148
target/m68k/cpu.c | 1 +
107
accel/tcg: Move CPUNegativeOffsetState into CPUState
149
target/m68k/translate.c | 4 +-
108
accel/tcg: Remove CPUState.icount_decr_ptr
150
target/microblaze/cpu.c | 1 +
109
accel/tcg: Move can_do_io to CPUNegativeOffsetState
151
target/microblaze/translate.c | 4 +-
110
accel/tcg: Remove cpu_neg()
152
target/mips/cpu.c | 1 +
111
tcg: Rename cpu_env to tcg_env
153
target/mips/tcg/translate.c | 4 +-
112
accel/tcg: Replace CPUState.env_ptr with cpu_env()
154
target/openrisc/cpu.c | 1 +
113
accel/tcg: Remove cpu_set_cpustate_pointers
155
target/openrisc/translate.c | 4 +-
114
accel/tcg: Remove env_neg()
156
target/ppc/cpu_init.c | 1 +
115
tcg: Remove TCGContext.tlb_fast_offset
157
target/ppc/translate.c | 4 +-
116
158
target/riscv/tcg/tcg-cpu.c | 1 +
117
MAINTAINERS | 7 +-
159
target/riscv/translate.c | 4 +-
118
meson.build | 6 +-
160
target/rx/cpu.c | 1 +
119
accel/tcg/atomic_template.h | 20 +-
161
target/rx/translate.c | 4 +-
120
accel/tcg/internal-common.h | 28 +
162
target/s390x/cpu.c | 1 +
121
accel/tcg/{internal.h => internal-target.h} | 21 +-
163
target/s390x/tcg/translate.c | 4 +-
122
accel/tcg/tcg-runtime.h | 25 +
164
target/sh4/cpu.c | 1 +
123
host/include/aarch64/host/cpuinfo.h | 1 +
165
target/sh4/translate.c | 4 +-
124
include/exec/cpu-all.h | 67 +-
166
target/sparc/cpu.c | 1 +
125
include/exec/cpu-common.h | 39 +
167
target/sparc/fop_helper.c | 8 +-
126
include/exec/cpu-defs.h | 138 -
168
target/sparc/translate.c | 84 ++--
127
include/exec/cpu_ldst.h | 8 +-
169
target/tricore/cpu.c | 1 +
128
include/exec/exec-all.h | 32 +-
170
target/tricore/translate.c | 5 +-
129
include/exec/user/thunk.h | 3 +-
171
target/xtensa/cpu.c | 1 +
130
include/fpu/softfloat.h | 12 +
172
target/xtensa/translate.c | 4 +-
131
include/hw/core/cpu.h | 180 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
132
include/qemu/accel.h | 12 +-
174
tests/tcg/multiarch/system/memory.c | 9 +-
133
include/qemu/plugin-memory.h | 11 +-
175
fpu/softfloat-parts.c.inc | 16 +-
134
include/qemu/typedefs.h | 1 -
176
75 files changed, 866 insertions(+), 1009 deletions(-)
135
include/tcg/tcg-op-gvec-common.h | 6 +
136
include/tcg/tcg.h | 3 +-
137
target/alpha/cpu.h | 1 -
138
target/arm/common-semi-target.h | 2 +-
139
target/arm/cpu-param.h | 12 -
140
target/arm/cpu.h | 1 -
141
target/arm/tcg/translate-a32.h | 2 +-
142
target/arm/tcg/translate-a64.h | 4 +-
143
target/arm/tcg/translate.h | 16 +-
144
target/avr/cpu.h | 1 -
145
target/cris/cpu.h | 1 -
146
target/hexagon/cpu.h | 2 +-
147
target/hexagon/gen_tcg.h | 120 +-
148
target/hexagon/gen_tcg_hvx.h | 20 +-
149
target/hexagon/macros.h | 8 +-
150
target/hppa/cpu.h | 1 -
151
target/i386/cpu.h | 1 -
152
target/loongarch/cpu.h | 1 -
153
target/m68k/cpu.h | 1 -
154
target/microblaze/cpu.h | 6 +-
155
target/mips/cpu.h | 4 +-
156
target/mips/tcg/translate.h | 6 +-
157
target/nios2/cpu.h | 1 -
158
target/openrisc/cpu.h | 1 -
159
target/ppc/cpu.h | 1 -
160
target/riscv/cpu.h | 2 +-
161
target/rx/cpu.h | 1 -
162
target/s390x/cpu.h | 1 -
163
target/sh4/cpu.h | 1 -
164
target/sparc/cpu.h | 1 -
165
target/tricore/cpu.h | 1 -
166
target/xtensa/cpu.h | 3 +-
167
tcg/loongarch64/tcg-target-con-set.h | 9 +
168
tcg/loongarch64/tcg-target-con-str.h | 3 +
169
tcg/loongarch64/tcg-target.h | 40 +-
170
tcg/loongarch64/tcg-target.opc.h | 12 +
171
accel/{accel-common.c => accel-target.c} | 27 +-
172
accel/dummy-cpus.c | 2 +-
173
accel/kvm/kvm-accel-ops.c | 2 +-
174
accel/tcg/cpu-exec-common.c | 5 +-
175
accel/tcg/cpu-exec.c | 31 +-
176
accel/tcg/cputlb.c | 1156 ++--
177
softmmu/icount.c => accel/tcg/icount-common.c | 7 +-
178
accel/tcg/monitor.c | 2 +-
179
accel/tcg/plugin-gen.c | 10 +-
180
accel/tcg/tb-maint.c | 3 +-
181
accel/tcg/tcg-accel-ops-icount.c | 8 +-
182
accel/tcg/tcg-accel-ops-mttcg.c | 11 +-
183
accel/tcg/tcg-accel-ops-rr.c | 4 +-
184
accel/tcg/tcg-accel-ops.c | 2 +-
185
accel/tcg/tcg-all.c | 4 +-
186
accel/tcg/tcg-runtime-gvec.c | 26 +
187
accel/tcg/translate-all.c | 15 +-
188
accel/tcg/translator.c | 22 +-
189
accel/tcg/user-exec.c | 279 +-
190
bsd-user/main.c | 2 +-
191
bsd-user/signal.c | 10 +-
192
cpus-common.c => cpu-common.c | 0
193
cpu.c => cpu-target.c | 13 +-
194
fpu/softfloat.c | 67 +-
195
gdbstub/gdbstub.c | 4 +-
196
gdbstub/user-target.c | 2 +-
197
hw/core/cpu-common.c | 6 +-
198
hw/i386/kvm/clock.c | 2 +-
199
hw/intc/mips_gic.c | 2 +-
200
hw/intc/riscv_aclint.c | 12 +-
201
hw/intc/riscv_imsic.c | 2 +-
202
hw/ppc/e500.c | 4 +-
203
hw/ppc/spapr.c | 2 +-
204
linux-user/elfload.c | 4 +-
205
linux-user/i386/cpu_loop.c | 2 +-
206
linux-user/main.c | 4 +-
207
linux-user/signal.c | 15 +-
208
monitor/hmp-cmds-target.c | 2 +-
209
page-vary.c => page-vary-target.c | 0
210
plugins/api.c | 27 +-
211
qom/object.c | 14 +
212
semihosting/arm-compat-semi.c | 6 +-
213
semihosting/syscalls.c | 28 +-
214
softmmu/async-teardown.c | 3 -
215
softmmu/watchpoint.c | 2 +-
216
target/alpha/cpu.c | 3 +-
217
target/alpha/translate.c | 146 +-
218
target/arm/cpu.c | 12 +-
219
target/arm/cpu64.c | 4 -
220
target/arm/helper.c | 2 +-
221
target/arm/ptw.c | 4 +-
222
target/arm/tcg/mte_helper.c | 2 +-
223
target/arm/tcg/sve_helper.c | 2 +-
224
target/arm/tcg/tlb_helper.c | 4 +-
225
target/arm/tcg/translate-a64.c | 380 +-
226
target/arm/tcg/translate-m-nocp.c | 24 +-
227
target/arm/tcg/translate-mve.c | 52 +-
228
target/arm/tcg/translate-neon.c | 78 +-
229
target/arm/tcg/translate-sme.c | 8 +-
230
target/arm/tcg/translate-sve.c | 172 +-
231
target/arm/tcg/translate-vfp.c | 56 +-
232
target/arm/tcg/translate.c | 290 +-
233
target/avr/cpu.c | 3 +-
234
target/avr/translate.c | 66 +-
235
target/cris/cpu.c | 3 +-
236
target/cris/translate.c | 72 +-
237
target/hexagon/cpu.c | 4 +-
238
target/hexagon/genptr.c | 36 +-
239
target/hexagon/idef-parser/parser-helpers.c | 2 +-
240
target/hexagon/translate.c | 52 +-
241
target/hppa/cpu.c | 2 +-
242
target/hppa/mem_helper.c | 2 +-
243
target/hppa/translate.c | 148 +-
244
target/i386/cpu.c | 2 +-
245
target/i386/kvm/kvm-cpu.c | 2 +-
246
target/i386/nvmm/nvmm-all.c | 14 +-
247
target/i386/tcg/sysemu/excp_helper.c | 2 +-
248
target/i386/tcg/tcg-cpu.c | 2 +-
249
target/i386/tcg/translate.c | 584 +-
250
target/i386/whpx/whpx-all.c | 26 +-
251
target/loongarch/cpu.c | 9 +-
252
target/loongarch/translate.c | 22 +-
253
target/m68k/cpu.c | 9 +-
254
target/m68k/translate.c | 306 +-
255
target/microblaze/cpu.c | 2 +-
256
target/microblaze/translate.c | 52 +-
257
target/mips/cpu.c | 2 +-
258
target/mips/tcg/lcsr_translate.c | 6 +-
259
target/mips/tcg/msa_translate.c | 34 +-
260
target/mips/tcg/mxu_translate.c | 4 +-
261
target/mips/tcg/sysemu/mips-semi.c | 4 +-
262
target/mips/tcg/translate.c | 1288 ++---
263
target/mips/tcg/vr54xx_translate.c | 2 +-
264
target/nios2/cpu.c | 5 +-
265
target/nios2/translate.c | 52 +-
266
target/openrisc/cpu.c | 7 +-
267
target/openrisc/translate.c | 86 +-
268
target/ppc/cpu_init.c | 1 -
269
target/ppc/excp_helper.c | 10 +-
270
target/ppc/translate.c | 366 +-
271
target/riscv/cpu.c | 8 +-
272
target/riscv/translate.c | 56 +-
273
target/rx/cpu.c | 5 +-
274
target/rx/translate.c | 58 +-
275
target/s390x/cpu.c | 2 -
276
target/s390x/tcg/translate.c | 426 +-
277
target/sh4/cpu.c | 3 +-
278
target/sh4/op_helper.c | 2 +-
279
target/sh4/translate.c | 128 +-
280
target/sparc/cpu.c | 3 +-
281
target/sparc/translate.c | 332 +-
282
target/tricore/cpu.c | 10 +-
283
target/tricore/translate.c | 224 +-
284
target/xtensa/cpu.c | 2 +-
285
target/xtensa/translate.c | 192 +-
286
tcg/region.c | 41 +-
287
tcg/tcg-op-gvec.c | 437 +-
288
tcg/tcg-op-ldst.c | 22 +-
289
tcg/tcg-op.c | 2 +-
290
tcg/tcg.c | 26 +-
291
tests/tcg/m68k/denormal.c | 53 +
292
util/cpuinfo-aarch64.c | 7 +
293
util/oslib-posix.c | 15 +-
294
accel/tcg/ldst_atomicity.c.inc | 88 +-
295
accel/tcg/ldst_common.c.inc | 225 +
296
fpu/softfloat-parts.c.inc | 7 +-
297
target/cris/translate_v10.c.inc | 28 +-
298
target/i386/tcg/decode-new.c.inc | 4 +-
299
target/i386/tcg/emit.c.inc | 262 +-
300
target/loongarch/insn_trans/trans_atomic.c.inc | 4 +-
301
target/loongarch/insn_trans/trans_branch.c.inc | 2 +-
302
target/loongarch/insn_trans/trans_extra.c.inc | 10 +-
303
target/loongarch/insn_trans/trans_farith.c.inc | 6 +-
304
target/loongarch/insn_trans/trans_fcmp.c.inc | 8 +-
305
target/loongarch/insn_trans/trans_fmemory.c.inc | 8 +-
306
target/loongarch/insn_trans/trans_fmov.c.inc | 20 +-
307
target/loongarch/insn_trans/trans_lsx.c.inc | 44 +-
308
target/loongarch/insn_trans/trans_memory.c.inc | 8 +-
309
target/loongarch/insn_trans/trans_privileged.c.inc | 52 +-
310
target/mips/tcg/micromips_translate.c.inc | 12 +-
311
target/mips/tcg/nanomips_translate.c.inc | 200 +-
312
target/ppc/power8-pmu-regs.c.inc | 8 +-
313
target/ppc/translate/branch-impl.c.inc | 2 +-
314
target/ppc/translate/dfp-impl.c.inc | 22 +-
315
target/ppc/translate/fixedpoint-impl.c.inc | 2 +-
316
target/ppc/translate/fp-impl.c.inc | 50 +-
317
target/ppc/translate/processor-ctrl-impl.c.inc | 8 +-
318
target/ppc/translate/spe-impl.c.inc | 30 +-
319
target/ppc/translate/storage-ctrl-impl.c.inc | 26 +-
320
target/ppc/translate/vmx-impl.c.inc | 34 +-
321
target/ppc/translate/vsx-impl.c.inc | 54 +-
322
target/riscv/insn_trans/trans_privileged.c.inc | 8 +-
323
target/riscv/insn_trans/trans_rvbf16.c.inc | 10 +-
324
target/riscv/insn_trans/trans_rvd.c.inc | 48 +-
325
target/riscv/insn_trans/trans_rvf.c.inc | 46 +-
326
target/riscv/insn_trans/trans_rvh.c.inc | 8 +-
327
target/riscv/insn_trans/trans_rvi.c.inc | 16 +-
328
target/riscv/insn_trans/trans_rvm.c.inc | 16 +-
329
target/riscv/insn_trans/trans_rvv.c.inc | 130 +-
330
target/riscv/insn_trans/trans_rvvk.c.inc | 30 +-
331
target/riscv/insn_trans/trans_rvzce.c.inc | 2 +-
332
target/riscv/insn_trans/trans_rvzfa.c.inc | 38 +-
333
target/riscv/insn_trans/trans_rvzfh.c.inc | 54 +-
334
target/riscv/insn_trans/trans_rvzicbo.c.inc | 8 +-
335
target/riscv/insn_trans/trans_svinval.c.inc | 6 +-
336
target/riscv/insn_trans/trans_xthead.c.inc | 2 +-
337
target/s390x/tcg/translate_vx.c.inc | 104 +-
338
tcg/aarch64/tcg-target.c.inc | 61 +-
339
tcg/arm/tcg-target.c.inc | 9 +-
340
tcg/i386/tcg-target.c.inc | 7 +-
341
tcg/loongarch64/tcg-insn-defs.c.inc | 6019 +++++++++++++++++++-
342
tcg/loongarch64/tcg-target.c.inc | 628 +-
343
tcg/mips/tcg-target.c.inc | 7 +-
344
tcg/ppc/tcg-target.c.inc | 7 +-
345
tcg/riscv/tcg-target.c.inc | 7 +-
346
tcg/s390x/tcg-target.c.inc | 7 +-
347
tcg/sparc64/tcg-target.c.inc | 7 +-
348
tcg/tci/tcg-target.c.inc | 7 +-
349
accel/meson.build | 4 +-
350
accel/tcg/meson.build | 8 +-
351
softmmu/meson.build | 4 -
352
target/hexagon/README | 10 +-
353
target/hexagon/gen_tcg_funcs.py | 16 +-
354
tests/tcg/m68k/Makefile.target | 2 +-
355
238 files changed, 12363 insertions(+), 5537 deletions(-)
356
create mode 100644 accel/tcg/internal-common.h
357
rename accel/tcg/{internal.h => internal-target.h} (89%)
358
create mode 100644 tcg/loongarch64/tcg-target.opc.h
359
rename accel/{accel-common.c => accel-target.c} (87%)
360
rename softmmu/icount.c => accel/tcg/icount-common.c (99%)
361
rename cpus-common.c => cpu-common.c (100%)
362
rename cpu.c => cpu-target.c (97%)
363
rename page-vary.c => page-vary-target.c (100%)
364
create mode 100644 tests/tcg/m68k/denormal.c
365
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
2
4
3
Pass vece to tcg_target_const_match() to allow correct interpretation of
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
const args of vector ops.
5
6
Signed-off-by: Jiajie Chen <c@jia.je>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-Id: <20230908022302.180442-4-c@jia.je>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
tcg/tcg.c | 4 ++--
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
13
tcg/aarch64/tcg-target.c.inc | 2 +-
9
1 file changed, 31 insertions(+), 16 deletions(-)
14
tcg/arm/tcg-target.c.inc | 2 +-
15
tcg/i386/tcg-target.c.inc | 2 +-
16
tcg/loongarch64/tcg-target.c.inc | 2 +-
17
tcg/mips/tcg-target.c.inc | 2 +-
18
tcg/ppc/tcg-target.c.inc | 2 +-
19
tcg/riscv/tcg-target.c.inc | 2 +-
20
tcg/s390x/tcg-target.c.inc | 2 +-
21
tcg/sparc64/tcg-target.c.inc | 2 +-
22
tcg/tci/tcg-target.c.inc | 2 +-
23
11 files changed, 12 insertions(+), 12 deletions(-)
24
10
25
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
26
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/tcg.c
13
--- a/tcg/optimize.c
28
+++ b/tcg/tcg.c
14
+++ b/tcg/optimize.c
29
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
30
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
31
const TCGHelperInfo *info);
32
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
33
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
34
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
35
#ifdef TCG_TARGET_NEED_LDST_LABELS
36
static int tcg_out_ldst_finalize(TCGContext *s);
37
#endif
38
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
39
ts = arg_temp(arg);
40
41
if (ts->val_type == TEMP_VAL_CONST
42
- && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
43
+ && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
44
/* constant is OK for instruction */
45
const_args[i] = 1;
46
new_args[i] = ts->val;
47
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/aarch64/tcg-target.c.inc
50
+++ b/tcg/aarch64/tcg-target.c.inc
51
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
52
}
16
}
53
}
17
}
54
18
55
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
19
+static void finish_bb(OptContext *ctx)
56
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
23
+}
24
+
25
+static void finish_ebb(OptContext *ctx)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
57
{
34
{
58
if (ct & TCG_CT_CONST) {
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
59
return 1;
36
int i, nb_oargs;
60
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
37
61
index XXXXXXX..XXXXXXX 100644
38
- /*
62
--- a/tcg/arm/tcg-target.c.inc
39
- * We only optimize extended basic blocks. If the opcode ends a BB
63
+++ b/tcg/arm/tcg-target.c.inc
40
- * and is not a conditional branch, reset all temp data.
64
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
41
- */
65
* mov operand2: values represented with x << (2 * y), x < 0x100
42
- if (def->flags & TCG_OPF_BB_END) {
66
* add, sub, eor...: ditto
43
- ctx->prev_mb = NULL;
67
*/
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
68
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
69
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
46
- remove_mem_copy_all(ctx);
70
{
47
- }
71
if (ct & TCG_CT_CONST) {
48
- return;
72
return 1;
49
- }
73
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
50
-
74
index XXXXXXX..XXXXXXX 100644
51
nb_oargs = def->nb_oargs;
75
--- a/tcg/i386/tcg-target.c.inc
52
for (i = 0; i < nb_oargs; i++) {
76
+++ b/tcg/i386/tcg-target.c.inc
53
TCGTemp *ts = arg_temp(op->args[i]);
77
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
78
}
64
}
79
65
80
/* test if a constant matches the constraint */
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
81
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
82
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
68
}
83
{
69
op->opc = INDEX_op_br;
84
if (ct & TCG_CT_CONST) {
70
op->args[0] = label;
85
return 1;
71
- break;
86
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
72
+ finish_ebb(ctx);
87
index XXXXXXX..XXXXXXX 100644
73
+ return true;
88
--- a/tcg/loongarch64/tcg-target.c.inc
74
}
89
+++ b/tcg/loongarch64/tcg-target.c.inc
75
- return false;
90
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
76
+
77
+ finish_bb(ctx);
78
+ return true;
91
}
79
}
92
80
93
/* test if a constant matches the constraint */
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
94
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
95
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
83
CASE_OP_32_64_VEC(xor):
96
{
84
done = fold_xor(&ctx, op);
97
if (ct & TCG_CT_CONST) {
85
break;
98
return true;
86
+ case INDEX_op_set_label:
99
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
87
+ case INDEX_op_br:
100
index XXXXXXX..XXXXXXX 100644
88
+ case INDEX_op_exit_tb:
101
--- a/tcg/mips/tcg-target.c.inc
89
+ case INDEX_op_goto_tb:
102
+++ b/tcg/mips/tcg-target.c.inc
90
+ case INDEX_op_goto_ptr:
103
@@ -XXX,XX +XXX,XX @@ static bool is_p2m1(tcg_target_long val)
91
+ finish_ebb(&ctx);
104
}
92
+ done = true;
105
93
+ break;
106
/* test if a constant matches the constraint */
94
default:
107
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
95
break;
108
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
96
}
109
{
110
if (ct & TCG_CT_CONST) {
111
return 1;
112
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/ppc/tcg-target.c.inc
115
+++ b/tcg/ppc/tcg-target.c.inc
116
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
117
}
118
119
/* test if a constant matches the constraint */
120
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
121
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
122
{
123
if (ct & TCG_CT_CONST) {
124
return 1;
125
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/riscv/tcg-target.c.inc
128
+++ b/tcg/riscv/tcg-target.c.inc
129
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
130
#define sextreg sextract64
131
132
/* test if a constant matches the constraint */
133
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
134
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
135
{
136
if (ct & TCG_CT_CONST) {
137
return 1;
138
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/s390x/tcg-target.c.inc
141
+++ b/tcg/s390x/tcg-target.c.inc
142
@@ -XXX,XX +XXX,XX @@ static bool risbg_mask(uint64_t c)
143
}
144
145
/* Test if a constant matches the constraint. */
146
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
147
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
148
{
149
if (ct & TCG_CT_CONST) {
150
return 1;
151
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
152
index XXXXXXX..XXXXXXX 100644
153
--- a/tcg/sparc64/tcg-target.c.inc
154
+++ b/tcg/sparc64/tcg-target.c.inc
155
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
156
}
157
158
/* test if a constant matches the constraint */
159
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
160
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
161
{
162
if (ct & TCG_CT_CONST) {
163
return 1;
164
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/tci/tcg-target.c.inc
167
+++ b/tcg/tci/tcg-target.c.inc
168
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
169
}
170
171
/* Test if a constant matches the constraint. */
172
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
173
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
174
{
175
return ct & TCG_CT_CONST;
176
}
177
--
97
--
178
2.34.1
98
2.43.0
179
180
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
1
Split out int_st_mmio_leN, to be used by both do_st_mmio_leN
1
Add a routine to which masks can be passed directly, rather than
2
and do_st16_mmio_leN. Move the locks down into the two
2
storing them into OptContext. To be used in upcoming patches.
3
functions, since each one now covers all accesses to once page.
4
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/cputlb.c | 88 ++++++++++++++++++++++++++++++----------------
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 58 insertions(+), 30 deletions(-)
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
9
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
12
--- a/tcg/optimize.c
13
+++ b/accel/tcg/cputlb.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
* The bytes to store are extracted in little-endian order from @val_le;
15
return fold_const2(ctx, op);
16
* return the bytes of @val_le beyond @p->size that have not been stored.
16
}
17
*/
17
18
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
- uint64_t val_le, vaddr addr, int size,
19
+/*
20
- int mmu_idx, uintptr_t ra)
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ uint64_t val_le, vaddr addr, int size,
22
+ * If z_mask allows, fold the output to constant zero.
23
+ int mmu_idx, uintptr_t ra,
23
+ */
24
+ MemoryRegion *mr, hwaddr mr_offset)
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
25
{
26
{
26
- MemoryRegionSection *section;
27
- uint64_t z_mask = ctx->z_mask;
27
- hwaddr mr_offset;
28
- uint64_t s_mask = ctx->s_mask;
28
- MemoryRegion *mr;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
29
- MemTxAttrs attrs;
30
TCGTemp *ts;
30
-
31
TempOptInfo *ti;
31
- tcg_debug_assert(size > 0 && size <= 8);
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
32
-
33
return true;
33
- attrs = full->attrs;
34
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
35
- mr = section->mr;
36
-
37
do {
38
MemOp this_mop;
39
unsigned this_size;
40
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
41
this_mop |= MO_LE;
42
43
r = memory_region_dispatch_write(mr, mr_offset, val_le,
44
- this_mop, attrs);
45
+ this_mop, full->attrs);
46
if (unlikely(r != MEMTX_OK)) {
47
io_failed(env, full, addr, this_size, MMU_DATA_STORE,
48
mmu_idx, r, ra);
49
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
50
return val_le;
51
}
34
}
52
35
53
+static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
54
+ uint64_t val_le, vaddr addr, int size,
55
+ int mmu_idx, uintptr_t ra)
56
+{
37
+{
57
+ MemoryRegionSection *section;
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
58
+ hwaddr mr_offset;
59
+ MemoryRegion *mr;
60
+ MemTxAttrs attrs;
61
+ uint64_t ret;
62
+
63
+ tcg_debug_assert(size > 0 && size <= 8);
64
+
65
+ attrs = full->attrs;
66
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
67
+ mr = section->mr;
68
+
69
+ qemu_mutex_lock_iothread();
70
+ ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
71
+ ra, mr, mr_offset);
72
+ qemu_mutex_unlock_iothread();
73
+
74
+ return ret;
75
+}
76
+
77
+static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
78
+ Int128 val_le, vaddr addr, int size,
79
+ int mmu_idx, uintptr_t ra)
80
+{
81
+ MemoryRegionSection *section;
82
+ MemoryRegion *mr;
83
+ hwaddr mr_offset;
84
+ MemTxAttrs attrs;
85
+ uint64_t ret;
86
+
87
+ tcg_debug_assert(size > 8 && size <= 16);
88
+
89
+ attrs = full->attrs;
90
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
91
+ mr = section->mr;
92
+
93
+ qemu_mutex_lock_iothread();
94
+ int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
95
+ mmu_idx, ra, mr, mr_offset);
96
+ ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
97
+ size - 8, mmu_idx, ra, mr, mr_offset + 8);
98
+ qemu_mutex_unlock_iothread();
99
+
100
+ return ret;
101
+}
39
+}
102
+
40
+
103
/*
41
/*
104
* Wrapper for the above.
42
* An "affected" mask bit is 0 if and only if the result is identical
105
*/
43
* to the first input. Thus if the entire mask is 0, the operation
106
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
107
unsigned tmp, half_size;
108
109
if (unlikely(p->flags & TLB_MMIO)) {
110
- QEMU_IOTHREAD_LOCK_GUARD();
111
return do_st_mmio_leN(env, p->full, val_le, p->addr,
112
p->size, mmu_idx, ra);
113
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
114
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
115
MemOp atom;
116
117
if (unlikely(p->flags & TLB_MMIO)) {
118
- QEMU_IOTHREAD_LOCK_GUARD();
119
- do_st_mmio_leN(env, p->full, int128_getlo(val_le),
120
- p->addr, 8, mmu_idx, ra);
121
- return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
122
- p->addr + 8, size - 8, mmu_idx, ra);
123
+ return do_st16_mmio_leN(env, p->full, val_le, p->addr,
124
+ size, mmu_idx, ra);
125
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
126
return int128_gethi(val_le) >> ((size - 8) * 8);
127
}
128
@@ -XXX,XX +XXX,XX @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
129
int mmu_idx, uintptr_t ra)
130
{
131
if (unlikely(p->flags & TLB_MMIO)) {
132
- QEMU_IOTHREAD_LOCK_GUARD();
133
do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
134
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
135
/* nothing */
136
@@ -XXX,XX +XXX,XX @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
137
if ((memop & MO_BSWAP) != MO_LE) {
138
val = bswap16(val);
139
}
140
- QEMU_IOTHREAD_LOCK_GUARD();
141
do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
142
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
143
/* nothing */
144
@@ -XXX,XX +XXX,XX @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
145
if ((memop & MO_BSWAP) != MO_LE) {
146
val = bswap32(val);
147
}
148
- QEMU_IOTHREAD_LOCK_GUARD();
149
do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
150
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
151
/* nothing */
152
@@ -XXX,XX +XXX,XX @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
153
if ((memop & MO_BSWAP) != MO_LE) {
154
val = bswap64(val);
155
}
156
- QEMU_IOTHREAD_LOCK_GUARD();
157
do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
158
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
159
/* nothing */
160
@@ -XXX,XX +XXX,XX @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
161
if ((l.memop & MO_BSWAP) != MO_LE) {
162
val = bswap128(val);
163
}
164
- a = int128_getlo(val);
165
- b = int128_gethi(val);
166
- QEMU_IOTHREAD_LOCK_GUARD();
167
- do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
168
- do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
169
+ do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
170
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
171
/* nothing */
172
} else {
173
--
44
--
174
2.34.1
45
2.43.0
diff view generated by jsdifflib
New patch
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 21 ++++++---------------
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
57
}
58
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
61
g_assert_not_reached();
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
135
--
136
2.43.0
diff view generated by jsdifflib
New patch
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
1
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
25
1 file changed, 15 insertions(+), 49 deletions(-)
26
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/optimize.c
30
+++ b/tcg/optimize.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
172
--
173
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Add opcodes and encoder functions for LSX.
4
5
Generated from
6
https://github.com/jiegec/loongarch-opcodes/tree/qemu-lsx.
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
9
Acked-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230908022302.180442-2-c@jia.je>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
3
---
13
tcg/loongarch64/tcg-insn-defs.c.inc | 6019 ++++++++++++++++++++++++++-
4
tcg/optimize.c | 9 +++++----
14
1 file changed, 6018 insertions(+), 1 deletion(-)
5
1 file changed, 5 insertions(+), 4 deletions(-)
15
6
16
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/loongarch64/tcg-insn-defs.c.inc
9
--- a/tcg/optimize.c
19
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
10
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
21
*
12
remove_mem_copy_all(ctx);
22
* This file is auto-generated by genqemutcgdefs from
23
* https://github.com/loongson-community/loongarch-opcodes,
24
- * from commit 25ca7effe9d88101c1cf96c4005423643386d81f.
25
+ * from commit 8027da9a8157a8b47fc48ff1def292e09c5668bd.
26
* DO NOT EDIT.
27
*/
28
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
30
OPC_ANDI = 0x03400000,
31
OPC_ORI = 0x03800000,
32
OPC_XORI = 0x03c00000,
33
+ OPC_VFMADD_S = 0x09100000,
34
+ OPC_VFMADD_D = 0x09200000,
35
+ OPC_VFMSUB_S = 0x09500000,
36
+ OPC_VFMSUB_D = 0x09600000,
37
+ OPC_VFNMADD_S = 0x09900000,
38
+ OPC_VFNMADD_D = 0x09a00000,
39
+ OPC_VFNMSUB_S = 0x09d00000,
40
+ OPC_VFNMSUB_D = 0x09e00000,
41
+ OPC_VFCMP_CAF_S = 0x0c500000,
42
+ OPC_VFCMP_SAF_S = 0x0c508000,
43
+ OPC_VFCMP_CLT_S = 0x0c510000,
44
+ OPC_VFCMP_SLT_S = 0x0c518000,
45
+ OPC_VFCMP_CEQ_S = 0x0c520000,
46
+ OPC_VFCMP_SEQ_S = 0x0c528000,
47
+ OPC_VFCMP_CLE_S = 0x0c530000,
48
+ OPC_VFCMP_SLE_S = 0x0c538000,
49
+ OPC_VFCMP_CUN_S = 0x0c540000,
50
+ OPC_VFCMP_SUN_S = 0x0c548000,
51
+ OPC_VFCMP_CULT_S = 0x0c550000,
52
+ OPC_VFCMP_SULT_S = 0x0c558000,
53
+ OPC_VFCMP_CUEQ_S = 0x0c560000,
54
+ OPC_VFCMP_SUEQ_S = 0x0c568000,
55
+ OPC_VFCMP_CULE_S = 0x0c570000,
56
+ OPC_VFCMP_SULE_S = 0x0c578000,
57
+ OPC_VFCMP_CNE_S = 0x0c580000,
58
+ OPC_VFCMP_SNE_S = 0x0c588000,
59
+ OPC_VFCMP_COR_S = 0x0c5a0000,
60
+ OPC_VFCMP_SOR_S = 0x0c5a8000,
61
+ OPC_VFCMP_CUNE_S = 0x0c5c0000,
62
+ OPC_VFCMP_SUNE_S = 0x0c5c8000,
63
+ OPC_VFCMP_CAF_D = 0x0c600000,
64
+ OPC_VFCMP_SAF_D = 0x0c608000,
65
+ OPC_VFCMP_CLT_D = 0x0c610000,
66
+ OPC_VFCMP_SLT_D = 0x0c618000,
67
+ OPC_VFCMP_CEQ_D = 0x0c620000,
68
+ OPC_VFCMP_SEQ_D = 0x0c628000,
69
+ OPC_VFCMP_CLE_D = 0x0c630000,
70
+ OPC_VFCMP_SLE_D = 0x0c638000,
71
+ OPC_VFCMP_CUN_D = 0x0c640000,
72
+ OPC_VFCMP_SUN_D = 0x0c648000,
73
+ OPC_VFCMP_CULT_D = 0x0c650000,
74
+ OPC_VFCMP_SULT_D = 0x0c658000,
75
+ OPC_VFCMP_CUEQ_D = 0x0c660000,
76
+ OPC_VFCMP_SUEQ_D = 0x0c668000,
77
+ OPC_VFCMP_CULE_D = 0x0c670000,
78
+ OPC_VFCMP_SULE_D = 0x0c678000,
79
+ OPC_VFCMP_CNE_D = 0x0c680000,
80
+ OPC_VFCMP_SNE_D = 0x0c688000,
81
+ OPC_VFCMP_COR_D = 0x0c6a0000,
82
+ OPC_VFCMP_SOR_D = 0x0c6a8000,
83
+ OPC_VFCMP_CUNE_D = 0x0c6c0000,
84
+ OPC_VFCMP_SUNE_D = 0x0c6c8000,
85
+ OPC_VBITSEL_V = 0x0d100000,
86
+ OPC_VSHUF_B = 0x0d500000,
87
OPC_ADDU16I_D = 0x10000000,
88
OPC_LU12I_W = 0x14000000,
89
OPC_CU32I_D = 0x16000000,
90
@@ -XXX,XX +XXX,XX @@ typedef enum {
91
OPC_LD_BU = 0x2a000000,
92
OPC_LD_HU = 0x2a400000,
93
OPC_LD_WU = 0x2a800000,
94
+ OPC_VLD = 0x2c000000,
95
+ OPC_VST = 0x2c400000,
96
+ OPC_VLDREPL_D = 0x30100000,
97
+ OPC_VLDREPL_W = 0x30200000,
98
+ OPC_VLDREPL_H = 0x30400000,
99
+ OPC_VLDREPL_B = 0x30800000,
100
+ OPC_VSTELM_D = 0x31100000,
101
+ OPC_VSTELM_W = 0x31200000,
102
+ OPC_VSTELM_H = 0x31400000,
103
+ OPC_VSTELM_B = 0x31800000,
104
OPC_LDX_B = 0x38000000,
105
OPC_LDX_H = 0x38040000,
106
OPC_LDX_W = 0x38080000,
107
@@ -XXX,XX +XXX,XX @@ typedef enum {
108
OPC_LDX_BU = 0x38200000,
109
OPC_LDX_HU = 0x38240000,
110
OPC_LDX_WU = 0x38280000,
111
+ OPC_VLDX = 0x38400000,
112
+ OPC_VSTX = 0x38440000,
113
OPC_DBAR = 0x38720000,
114
OPC_JIRL = 0x4c000000,
115
OPC_B = 0x50000000,
116
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
OPC_BLE = 0x64000000,
118
OPC_BGTU = 0x68000000,
119
OPC_BLEU = 0x6c000000,
120
+ OPC_VSEQ_B = 0x70000000,
121
+ OPC_VSEQ_H = 0x70008000,
122
+ OPC_VSEQ_W = 0x70010000,
123
+ OPC_VSEQ_D = 0x70018000,
124
+ OPC_VSLE_B = 0x70020000,
125
+ OPC_VSLE_H = 0x70028000,
126
+ OPC_VSLE_W = 0x70030000,
127
+ OPC_VSLE_D = 0x70038000,
128
+ OPC_VSLE_BU = 0x70040000,
129
+ OPC_VSLE_HU = 0x70048000,
130
+ OPC_VSLE_WU = 0x70050000,
131
+ OPC_VSLE_DU = 0x70058000,
132
+ OPC_VSLT_B = 0x70060000,
133
+ OPC_VSLT_H = 0x70068000,
134
+ OPC_VSLT_W = 0x70070000,
135
+ OPC_VSLT_D = 0x70078000,
136
+ OPC_VSLT_BU = 0x70080000,
137
+ OPC_VSLT_HU = 0x70088000,
138
+ OPC_VSLT_WU = 0x70090000,
139
+ OPC_VSLT_DU = 0x70098000,
140
+ OPC_VADD_B = 0x700a0000,
141
+ OPC_VADD_H = 0x700a8000,
142
+ OPC_VADD_W = 0x700b0000,
143
+ OPC_VADD_D = 0x700b8000,
144
+ OPC_VSUB_B = 0x700c0000,
145
+ OPC_VSUB_H = 0x700c8000,
146
+ OPC_VSUB_W = 0x700d0000,
147
+ OPC_VSUB_D = 0x700d8000,
148
+ OPC_VADDWEV_H_B = 0x701e0000,
149
+ OPC_VADDWEV_W_H = 0x701e8000,
150
+ OPC_VADDWEV_D_W = 0x701f0000,
151
+ OPC_VADDWEV_Q_D = 0x701f8000,
152
+ OPC_VSUBWEV_H_B = 0x70200000,
153
+ OPC_VSUBWEV_W_H = 0x70208000,
154
+ OPC_VSUBWEV_D_W = 0x70210000,
155
+ OPC_VSUBWEV_Q_D = 0x70218000,
156
+ OPC_VADDWOD_H_B = 0x70220000,
157
+ OPC_VADDWOD_W_H = 0x70228000,
158
+ OPC_VADDWOD_D_W = 0x70230000,
159
+ OPC_VADDWOD_Q_D = 0x70238000,
160
+ OPC_VSUBWOD_H_B = 0x70240000,
161
+ OPC_VSUBWOD_W_H = 0x70248000,
162
+ OPC_VSUBWOD_D_W = 0x70250000,
163
+ OPC_VSUBWOD_Q_D = 0x70258000,
164
+ OPC_VADDWEV_H_BU = 0x702e0000,
165
+ OPC_VADDWEV_W_HU = 0x702e8000,
166
+ OPC_VADDWEV_D_WU = 0x702f0000,
167
+ OPC_VADDWEV_Q_DU = 0x702f8000,
168
+ OPC_VSUBWEV_H_BU = 0x70300000,
169
+ OPC_VSUBWEV_W_HU = 0x70308000,
170
+ OPC_VSUBWEV_D_WU = 0x70310000,
171
+ OPC_VSUBWEV_Q_DU = 0x70318000,
172
+ OPC_VADDWOD_H_BU = 0x70320000,
173
+ OPC_VADDWOD_W_HU = 0x70328000,
174
+ OPC_VADDWOD_D_WU = 0x70330000,
175
+ OPC_VADDWOD_Q_DU = 0x70338000,
176
+ OPC_VSUBWOD_H_BU = 0x70340000,
177
+ OPC_VSUBWOD_W_HU = 0x70348000,
178
+ OPC_VSUBWOD_D_WU = 0x70350000,
179
+ OPC_VSUBWOD_Q_DU = 0x70358000,
180
+ OPC_VADDWEV_H_BU_B = 0x703e0000,
181
+ OPC_VADDWEV_W_HU_H = 0x703e8000,
182
+ OPC_VADDWEV_D_WU_W = 0x703f0000,
183
+ OPC_VADDWEV_Q_DU_D = 0x703f8000,
184
+ OPC_VADDWOD_H_BU_B = 0x70400000,
185
+ OPC_VADDWOD_W_HU_H = 0x70408000,
186
+ OPC_VADDWOD_D_WU_W = 0x70410000,
187
+ OPC_VADDWOD_Q_DU_D = 0x70418000,
188
+ OPC_VSADD_B = 0x70460000,
189
+ OPC_VSADD_H = 0x70468000,
190
+ OPC_VSADD_W = 0x70470000,
191
+ OPC_VSADD_D = 0x70478000,
192
+ OPC_VSSUB_B = 0x70480000,
193
+ OPC_VSSUB_H = 0x70488000,
194
+ OPC_VSSUB_W = 0x70490000,
195
+ OPC_VSSUB_D = 0x70498000,
196
+ OPC_VSADD_BU = 0x704a0000,
197
+ OPC_VSADD_HU = 0x704a8000,
198
+ OPC_VSADD_WU = 0x704b0000,
199
+ OPC_VSADD_DU = 0x704b8000,
200
+ OPC_VSSUB_BU = 0x704c0000,
201
+ OPC_VSSUB_HU = 0x704c8000,
202
+ OPC_VSSUB_WU = 0x704d0000,
203
+ OPC_VSSUB_DU = 0x704d8000,
204
+ OPC_VHADDW_H_B = 0x70540000,
205
+ OPC_VHADDW_W_H = 0x70548000,
206
+ OPC_VHADDW_D_W = 0x70550000,
207
+ OPC_VHADDW_Q_D = 0x70558000,
208
+ OPC_VHSUBW_H_B = 0x70560000,
209
+ OPC_VHSUBW_W_H = 0x70568000,
210
+ OPC_VHSUBW_D_W = 0x70570000,
211
+ OPC_VHSUBW_Q_D = 0x70578000,
212
+ OPC_VHADDW_HU_BU = 0x70580000,
213
+ OPC_VHADDW_WU_HU = 0x70588000,
214
+ OPC_VHADDW_DU_WU = 0x70590000,
215
+ OPC_VHADDW_QU_DU = 0x70598000,
216
+ OPC_VHSUBW_HU_BU = 0x705a0000,
217
+ OPC_VHSUBW_WU_HU = 0x705a8000,
218
+ OPC_VHSUBW_DU_WU = 0x705b0000,
219
+ OPC_VHSUBW_QU_DU = 0x705b8000,
220
+ OPC_VADDA_B = 0x705c0000,
221
+ OPC_VADDA_H = 0x705c8000,
222
+ OPC_VADDA_W = 0x705d0000,
223
+ OPC_VADDA_D = 0x705d8000,
224
+ OPC_VABSD_B = 0x70600000,
225
+ OPC_VABSD_H = 0x70608000,
226
+ OPC_VABSD_W = 0x70610000,
227
+ OPC_VABSD_D = 0x70618000,
228
+ OPC_VABSD_BU = 0x70620000,
229
+ OPC_VABSD_HU = 0x70628000,
230
+ OPC_VABSD_WU = 0x70630000,
231
+ OPC_VABSD_DU = 0x70638000,
232
+ OPC_VAVG_B = 0x70640000,
233
+ OPC_VAVG_H = 0x70648000,
234
+ OPC_VAVG_W = 0x70650000,
235
+ OPC_VAVG_D = 0x70658000,
236
+ OPC_VAVG_BU = 0x70660000,
237
+ OPC_VAVG_HU = 0x70668000,
238
+ OPC_VAVG_WU = 0x70670000,
239
+ OPC_VAVG_DU = 0x70678000,
240
+ OPC_VAVGR_B = 0x70680000,
241
+ OPC_VAVGR_H = 0x70688000,
242
+ OPC_VAVGR_W = 0x70690000,
243
+ OPC_VAVGR_D = 0x70698000,
244
+ OPC_VAVGR_BU = 0x706a0000,
245
+ OPC_VAVGR_HU = 0x706a8000,
246
+ OPC_VAVGR_WU = 0x706b0000,
247
+ OPC_VAVGR_DU = 0x706b8000,
248
+ OPC_VMAX_B = 0x70700000,
249
+ OPC_VMAX_H = 0x70708000,
250
+ OPC_VMAX_W = 0x70710000,
251
+ OPC_VMAX_D = 0x70718000,
252
+ OPC_VMIN_B = 0x70720000,
253
+ OPC_VMIN_H = 0x70728000,
254
+ OPC_VMIN_W = 0x70730000,
255
+ OPC_VMIN_D = 0x70738000,
256
+ OPC_VMAX_BU = 0x70740000,
257
+ OPC_VMAX_HU = 0x70748000,
258
+ OPC_VMAX_WU = 0x70750000,
259
+ OPC_VMAX_DU = 0x70758000,
260
+ OPC_VMIN_BU = 0x70760000,
261
+ OPC_VMIN_HU = 0x70768000,
262
+ OPC_VMIN_WU = 0x70770000,
263
+ OPC_VMIN_DU = 0x70778000,
264
+ OPC_VMUL_B = 0x70840000,
265
+ OPC_VMUL_H = 0x70848000,
266
+ OPC_VMUL_W = 0x70850000,
267
+ OPC_VMUL_D = 0x70858000,
268
+ OPC_VMUH_B = 0x70860000,
269
+ OPC_VMUH_H = 0x70868000,
270
+ OPC_VMUH_W = 0x70870000,
271
+ OPC_VMUH_D = 0x70878000,
272
+ OPC_VMUH_BU = 0x70880000,
273
+ OPC_VMUH_HU = 0x70888000,
274
+ OPC_VMUH_WU = 0x70890000,
275
+ OPC_VMUH_DU = 0x70898000,
276
+ OPC_VMULWEV_H_B = 0x70900000,
277
+ OPC_VMULWEV_W_H = 0x70908000,
278
+ OPC_VMULWEV_D_W = 0x70910000,
279
+ OPC_VMULWEV_Q_D = 0x70918000,
280
+ OPC_VMULWOD_H_B = 0x70920000,
281
+ OPC_VMULWOD_W_H = 0x70928000,
282
+ OPC_VMULWOD_D_W = 0x70930000,
283
+ OPC_VMULWOD_Q_D = 0x70938000,
284
+ OPC_VMULWEV_H_BU = 0x70980000,
285
+ OPC_VMULWEV_W_HU = 0x70988000,
286
+ OPC_VMULWEV_D_WU = 0x70990000,
287
+ OPC_VMULWEV_Q_DU = 0x70998000,
288
+ OPC_VMULWOD_H_BU = 0x709a0000,
289
+ OPC_VMULWOD_W_HU = 0x709a8000,
290
+ OPC_VMULWOD_D_WU = 0x709b0000,
291
+ OPC_VMULWOD_Q_DU = 0x709b8000,
292
+ OPC_VMULWEV_H_BU_B = 0x70a00000,
293
+ OPC_VMULWEV_W_HU_H = 0x70a08000,
294
+ OPC_VMULWEV_D_WU_W = 0x70a10000,
295
+ OPC_VMULWEV_Q_DU_D = 0x70a18000,
296
+ OPC_VMULWOD_H_BU_B = 0x70a20000,
297
+ OPC_VMULWOD_W_HU_H = 0x70a28000,
298
+ OPC_VMULWOD_D_WU_W = 0x70a30000,
299
+ OPC_VMULWOD_Q_DU_D = 0x70a38000,
300
+ OPC_VMADD_B = 0x70a80000,
301
+ OPC_VMADD_H = 0x70a88000,
302
+ OPC_VMADD_W = 0x70a90000,
303
+ OPC_VMADD_D = 0x70a98000,
304
+ OPC_VMSUB_B = 0x70aa0000,
305
+ OPC_VMSUB_H = 0x70aa8000,
306
+ OPC_VMSUB_W = 0x70ab0000,
307
+ OPC_VMSUB_D = 0x70ab8000,
308
+ OPC_VMADDWEV_H_B = 0x70ac0000,
309
+ OPC_VMADDWEV_W_H = 0x70ac8000,
310
+ OPC_VMADDWEV_D_W = 0x70ad0000,
311
+ OPC_VMADDWEV_Q_D = 0x70ad8000,
312
+ OPC_VMADDWOD_H_B = 0x70ae0000,
313
+ OPC_VMADDWOD_W_H = 0x70ae8000,
314
+ OPC_VMADDWOD_D_W = 0x70af0000,
315
+ OPC_VMADDWOD_Q_D = 0x70af8000,
316
+ OPC_VMADDWEV_H_BU = 0x70b40000,
317
+ OPC_VMADDWEV_W_HU = 0x70b48000,
318
+ OPC_VMADDWEV_D_WU = 0x70b50000,
319
+ OPC_VMADDWEV_Q_DU = 0x70b58000,
320
+ OPC_VMADDWOD_H_BU = 0x70b60000,
321
+ OPC_VMADDWOD_W_HU = 0x70b68000,
322
+ OPC_VMADDWOD_D_WU = 0x70b70000,
323
+ OPC_VMADDWOD_Q_DU = 0x70b78000,
324
+ OPC_VMADDWEV_H_BU_B = 0x70bc0000,
325
+ OPC_VMADDWEV_W_HU_H = 0x70bc8000,
326
+ OPC_VMADDWEV_D_WU_W = 0x70bd0000,
327
+ OPC_VMADDWEV_Q_DU_D = 0x70bd8000,
328
+ OPC_VMADDWOD_H_BU_B = 0x70be0000,
329
+ OPC_VMADDWOD_W_HU_H = 0x70be8000,
330
+ OPC_VMADDWOD_D_WU_W = 0x70bf0000,
331
+ OPC_VMADDWOD_Q_DU_D = 0x70bf8000,
332
+ OPC_VDIV_B = 0x70e00000,
333
+ OPC_VDIV_H = 0x70e08000,
334
+ OPC_VDIV_W = 0x70e10000,
335
+ OPC_VDIV_D = 0x70e18000,
336
+ OPC_VMOD_B = 0x70e20000,
337
+ OPC_VMOD_H = 0x70e28000,
338
+ OPC_VMOD_W = 0x70e30000,
339
+ OPC_VMOD_D = 0x70e38000,
340
+ OPC_VDIV_BU = 0x70e40000,
341
+ OPC_VDIV_HU = 0x70e48000,
342
+ OPC_VDIV_WU = 0x70e50000,
343
+ OPC_VDIV_DU = 0x70e58000,
344
+ OPC_VMOD_BU = 0x70e60000,
345
+ OPC_VMOD_HU = 0x70e68000,
346
+ OPC_VMOD_WU = 0x70e70000,
347
+ OPC_VMOD_DU = 0x70e78000,
348
+ OPC_VSLL_B = 0x70e80000,
349
+ OPC_VSLL_H = 0x70e88000,
350
+ OPC_VSLL_W = 0x70e90000,
351
+ OPC_VSLL_D = 0x70e98000,
352
+ OPC_VSRL_B = 0x70ea0000,
353
+ OPC_VSRL_H = 0x70ea8000,
354
+ OPC_VSRL_W = 0x70eb0000,
355
+ OPC_VSRL_D = 0x70eb8000,
356
+ OPC_VSRA_B = 0x70ec0000,
357
+ OPC_VSRA_H = 0x70ec8000,
358
+ OPC_VSRA_W = 0x70ed0000,
359
+ OPC_VSRA_D = 0x70ed8000,
360
+ OPC_VROTR_B = 0x70ee0000,
361
+ OPC_VROTR_H = 0x70ee8000,
362
+ OPC_VROTR_W = 0x70ef0000,
363
+ OPC_VROTR_D = 0x70ef8000,
364
+ OPC_VSRLR_B = 0x70f00000,
365
+ OPC_VSRLR_H = 0x70f08000,
366
+ OPC_VSRLR_W = 0x70f10000,
367
+ OPC_VSRLR_D = 0x70f18000,
368
+ OPC_VSRAR_B = 0x70f20000,
369
+ OPC_VSRAR_H = 0x70f28000,
370
+ OPC_VSRAR_W = 0x70f30000,
371
+ OPC_VSRAR_D = 0x70f38000,
372
+ OPC_VSRLN_B_H = 0x70f48000,
373
+ OPC_VSRLN_H_W = 0x70f50000,
374
+ OPC_VSRLN_W_D = 0x70f58000,
375
+ OPC_VSRAN_B_H = 0x70f68000,
376
+ OPC_VSRAN_H_W = 0x70f70000,
377
+ OPC_VSRAN_W_D = 0x70f78000,
378
+ OPC_VSRLRN_B_H = 0x70f88000,
379
+ OPC_VSRLRN_H_W = 0x70f90000,
380
+ OPC_VSRLRN_W_D = 0x70f98000,
381
+ OPC_VSRARN_B_H = 0x70fa8000,
382
+ OPC_VSRARN_H_W = 0x70fb0000,
383
+ OPC_VSRARN_W_D = 0x70fb8000,
384
+ OPC_VSSRLN_B_H = 0x70fc8000,
385
+ OPC_VSSRLN_H_W = 0x70fd0000,
386
+ OPC_VSSRLN_W_D = 0x70fd8000,
387
+ OPC_VSSRAN_B_H = 0x70fe8000,
388
+ OPC_VSSRAN_H_W = 0x70ff0000,
389
+ OPC_VSSRAN_W_D = 0x70ff8000,
390
+ OPC_VSSRLRN_B_H = 0x71008000,
391
+ OPC_VSSRLRN_H_W = 0x71010000,
392
+ OPC_VSSRLRN_W_D = 0x71018000,
393
+ OPC_VSSRARN_B_H = 0x71028000,
394
+ OPC_VSSRARN_H_W = 0x71030000,
395
+ OPC_VSSRARN_W_D = 0x71038000,
396
+ OPC_VSSRLN_BU_H = 0x71048000,
397
+ OPC_VSSRLN_HU_W = 0x71050000,
398
+ OPC_VSSRLN_WU_D = 0x71058000,
399
+ OPC_VSSRAN_BU_H = 0x71068000,
400
+ OPC_VSSRAN_HU_W = 0x71070000,
401
+ OPC_VSSRAN_WU_D = 0x71078000,
402
+ OPC_VSSRLRN_BU_H = 0x71088000,
403
+ OPC_VSSRLRN_HU_W = 0x71090000,
404
+ OPC_VSSRLRN_WU_D = 0x71098000,
405
+ OPC_VSSRARN_BU_H = 0x710a8000,
406
+ OPC_VSSRARN_HU_W = 0x710b0000,
407
+ OPC_VSSRARN_WU_D = 0x710b8000,
408
+ OPC_VBITCLR_B = 0x710c0000,
409
+ OPC_VBITCLR_H = 0x710c8000,
410
+ OPC_VBITCLR_W = 0x710d0000,
411
+ OPC_VBITCLR_D = 0x710d8000,
412
+ OPC_VBITSET_B = 0x710e0000,
413
+ OPC_VBITSET_H = 0x710e8000,
414
+ OPC_VBITSET_W = 0x710f0000,
415
+ OPC_VBITSET_D = 0x710f8000,
416
+ OPC_VBITREV_B = 0x71100000,
417
+ OPC_VBITREV_H = 0x71108000,
418
+ OPC_VBITREV_W = 0x71110000,
419
+ OPC_VBITREV_D = 0x71118000,
420
+ OPC_VPACKEV_B = 0x71160000,
421
+ OPC_VPACKEV_H = 0x71168000,
422
+ OPC_VPACKEV_W = 0x71170000,
423
+ OPC_VPACKEV_D = 0x71178000,
424
+ OPC_VPACKOD_B = 0x71180000,
425
+ OPC_VPACKOD_H = 0x71188000,
426
+ OPC_VPACKOD_W = 0x71190000,
427
+ OPC_VPACKOD_D = 0x71198000,
428
+ OPC_VILVL_B = 0x711a0000,
429
+ OPC_VILVL_H = 0x711a8000,
430
+ OPC_VILVL_W = 0x711b0000,
431
+ OPC_VILVL_D = 0x711b8000,
432
+ OPC_VILVH_B = 0x711c0000,
433
+ OPC_VILVH_H = 0x711c8000,
434
+ OPC_VILVH_W = 0x711d0000,
435
+ OPC_VILVH_D = 0x711d8000,
436
+ OPC_VPICKEV_B = 0x711e0000,
437
+ OPC_VPICKEV_H = 0x711e8000,
438
+ OPC_VPICKEV_W = 0x711f0000,
439
+ OPC_VPICKEV_D = 0x711f8000,
440
+ OPC_VPICKOD_B = 0x71200000,
441
+ OPC_VPICKOD_H = 0x71208000,
442
+ OPC_VPICKOD_W = 0x71210000,
443
+ OPC_VPICKOD_D = 0x71218000,
444
+ OPC_VREPLVE_B = 0x71220000,
445
+ OPC_VREPLVE_H = 0x71228000,
446
+ OPC_VREPLVE_W = 0x71230000,
447
+ OPC_VREPLVE_D = 0x71238000,
448
+ OPC_VAND_V = 0x71260000,
449
+ OPC_VOR_V = 0x71268000,
450
+ OPC_VXOR_V = 0x71270000,
451
+ OPC_VNOR_V = 0x71278000,
452
+ OPC_VANDN_V = 0x71280000,
453
+ OPC_VORN_V = 0x71288000,
454
+ OPC_VFRSTP_B = 0x712b0000,
455
+ OPC_VFRSTP_H = 0x712b8000,
456
+ OPC_VADD_Q = 0x712d0000,
457
+ OPC_VSUB_Q = 0x712d8000,
458
+ OPC_VSIGNCOV_B = 0x712e0000,
459
+ OPC_VSIGNCOV_H = 0x712e8000,
460
+ OPC_VSIGNCOV_W = 0x712f0000,
461
+ OPC_VSIGNCOV_D = 0x712f8000,
462
+ OPC_VFADD_S = 0x71308000,
463
+ OPC_VFADD_D = 0x71310000,
464
+ OPC_VFSUB_S = 0x71328000,
465
+ OPC_VFSUB_D = 0x71330000,
466
+ OPC_VFMUL_S = 0x71388000,
467
+ OPC_VFMUL_D = 0x71390000,
468
+ OPC_VFDIV_S = 0x713a8000,
469
+ OPC_VFDIV_D = 0x713b0000,
470
+ OPC_VFMAX_S = 0x713c8000,
471
+ OPC_VFMAX_D = 0x713d0000,
472
+ OPC_VFMIN_S = 0x713e8000,
473
+ OPC_VFMIN_D = 0x713f0000,
474
+ OPC_VFMAXA_S = 0x71408000,
475
+ OPC_VFMAXA_D = 0x71410000,
476
+ OPC_VFMINA_S = 0x71428000,
477
+ OPC_VFMINA_D = 0x71430000,
478
+ OPC_VFCVT_H_S = 0x71460000,
479
+ OPC_VFCVT_S_D = 0x71468000,
480
+ OPC_VFFINT_S_L = 0x71480000,
481
+ OPC_VFTINT_W_D = 0x71498000,
482
+ OPC_VFTINTRM_W_D = 0x714a0000,
483
+ OPC_VFTINTRP_W_D = 0x714a8000,
484
+ OPC_VFTINTRZ_W_D = 0x714b0000,
485
+ OPC_VFTINTRNE_W_D = 0x714b8000,
486
+ OPC_VSHUF_H = 0x717a8000,
487
+ OPC_VSHUF_W = 0x717b0000,
488
+ OPC_VSHUF_D = 0x717b8000,
489
+ OPC_VSEQI_B = 0x72800000,
490
+ OPC_VSEQI_H = 0x72808000,
491
+ OPC_VSEQI_W = 0x72810000,
492
+ OPC_VSEQI_D = 0x72818000,
493
+ OPC_VSLEI_B = 0x72820000,
494
+ OPC_VSLEI_H = 0x72828000,
495
+ OPC_VSLEI_W = 0x72830000,
496
+ OPC_VSLEI_D = 0x72838000,
497
+ OPC_VSLEI_BU = 0x72840000,
498
+ OPC_VSLEI_HU = 0x72848000,
499
+ OPC_VSLEI_WU = 0x72850000,
500
+ OPC_VSLEI_DU = 0x72858000,
501
+ OPC_VSLTI_B = 0x72860000,
502
+ OPC_VSLTI_H = 0x72868000,
503
+ OPC_VSLTI_W = 0x72870000,
504
+ OPC_VSLTI_D = 0x72878000,
505
+ OPC_VSLTI_BU = 0x72880000,
506
+ OPC_VSLTI_HU = 0x72888000,
507
+ OPC_VSLTI_WU = 0x72890000,
508
+ OPC_VSLTI_DU = 0x72898000,
509
+ OPC_VADDI_BU = 0x728a0000,
510
+ OPC_VADDI_HU = 0x728a8000,
511
+ OPC_VADDI_WU = 0x728b0000,
512
+ OPC_VADDI_DU = 0x728b8000,
513
+ OPC_VSUBI_BU = 0x728c0000,
514
+ OPC_VSUBI_HU = 0x728c8000,
515
+ OPC_VSUBI_WU = 0x728d0000,
516
+ OPC_VSUBI_DU = 0x728d8000,
517
+ OPC_VBSLL_V = 0x728e0000,
518
+ OPC_VBSRL_V = 0x728e8000,
519
+ OPC_VMAXI_B = 0x72900000,
520
+ OPC_VMAXI_H = 0x72908000,
521
+ OPC_VMAXI_W = 0x72910000,
522
+ OPC_VMAXI_D = 0x72918000,
523
+ OPC_VMINI_B = 0x72920000,
524
+ OPC_VMINI_H = 0x72928000,
525
+ OPC_VMINI_W = 0x72930000,
526
+ OPC_VMINI_D = 0x72938000,
527
+ OPC_VMAXI_BU = 0x72940000,
528
+ OPC_VMAXI_HU = 0x72948000,
529
+ OPC_VMAXI_WU = 0x72950000,
530
+ OPC_VMAXI_DU = 0x72958000,
531
+ OPC_VMINI_BU = 0x72960000,
532
+ OPC_VMINI_HU = 0x72968000,
533
+ OPC_VMINI_WU = 0x72970000,
534
+ OPC_VMINI_DU = 0x72978000,
535
+ OPC_VFRSTPI_B = 0x729a0000,
536
+ OPC_VFRSTPI_H = 0x729a8000,
537
+ OPC_VCLO_B = 0x729c0000,
538
+ OPC_VCLO_H = 0x729c0400,
539
+ OPC_VCLO_W = 0x729c0800,
540
+ OPC_VCLO_D = 0x729c0c00,
541
+ OPC_VCLZ_B = 0x729c1000,
542
+ OPC_VCLZ_H = 0x729c1400,
543
+ OPC_VCLZ_W = 0x729c1800,
544
+ OPC_VCLZ_D = 0x729c1c00,
545
+ OPC_VPCNT_B = 0x729c2000,
546
+ OPC_VPCNT_H = 0x729c2400,
547
+ OPC_VPCNT_W = 0x729c2800,
548
+ OPC_VPCNT_D = 0x729c2c00,
549
+ OPC_VNEG_B = 0x729c3000,
550
+ OPC_VNEG_H = 0x729c3400,
551
+ OPC_VNEG_W = 0x729c3800,
552
+ OPC_VNEG_D = 0x729c3c00,
553
+ OPC_VMSKLTZ_B = 0x729c4000,
554
+ OPC_VMSKLTZ_H = 0x729c4400,
555
+ OPC_VMSKLTZ_W = 0x729c4800,
556
+ OPC_VMSKLTZ_D = 0x729c4c00,
557
+ OPC_VMSKGEZ_B = 0x729c5000,
558
+ OPC_VMSKNZ_B = 0x729c6000,
559
+ OPC_VSETEQZ_V = 0x729c9800,
560
+ OPC_VSETNEZ_V = 0x729c9c00,
561
+ OPC_VSETANYEQZ_B = 0x729ca000,
562
+ OPC_VSETANYEQZ_H = 0x729ca400,
563
+ OPC_VSETANYEQZ_W = 0x729ca800,
564
+ OPC_VSETANYEQZ_D = 0x729cac00,
565
+ OPC_VSETALLNEZ_B = 0x729cb000,
566
+ OPC_VSETALLNEZ_H = 0x729cb400,
567
+ OPC_VSETALLNEZ_W = 0x729cb800,
568
+ OPC_VSETALLNEZ_D = 0x729cbc00,
569
+ OPC_VFLOGB_S = 0x729cc400,
570
+ OPC_VFLOGB_D = 0x729cc800,
571
+ OPC_VFCLASS_S = 0x729cd400,
572
+ OPC_VFCLASS_D = 0x729cd800,
573
+ OPC_VFSQRT_S = 0x729ce400,
574
+ OPC_VFSQRT_D = 0x729ce800,
575
+ OPC_VFRECIP_S = 0x729cf400,
576
+ OPC_VFRECIP_D = 0x729cf800,
577
+ OPC_VFRSQRT_S = 0x729d0400,
578
+ OPC_VFRSQRT_D = 0x729d0800,
579
+ OPC_VFRINT_S = 0x729d3400,
580
+ OPC_VFRINT_D = 0x729d3800,
581
+ OPC_VFRINTRM_S = 0x729d4400,
582
+ OPC_VFRINTRM_D = 0x729d4800,
583
+ OPC_VFRINTRP_S = 0x729d5400,
584
+ OPC_VFRINTRP_D = 0x729d5800,
585
+ OPC_VFRINTRZ_S = 0x729d6400,
586
+ OPC_VFRINTRZ_D = 0x729d6800,
587
+ OPC_VFRINTRNE_S = 0x729d7400,
588
+ OPC_VFRINTRNE_D = 0x729d7800,
589
+ OPC_VFCVTL_S_H = 0x729de800,
590
+ OPC_VFCVTH_S_H = 0x729dec00,
591
+ OPC_VFCVTL_D_S = 0x729df000,
592
+ OPC_VFCVTH_D_S = 0x729df400,
593
+ OPC_VFFINT_S_W = 0x729e0000,
594
+ OPC_VFFINT_S_WU = 0x729e0400,
595
+ OPC_VFFINT_D_L = 0x729e0800,
596
+ OPC_VFFINT_D_LU = 0x729e0c00,
597
+ OPC_VFFINTL_D_W = 0x729e1000,
598
+ OPC_VFFINTH_D_W = 0x729e1400,
599
+ OPC_VFTINT_W_S = 0x729e3000,
600
+ OPC_VFTINT_L_D = 0x729e3400,
601
+ OPC_VFTINTRM_W_S = 0x729e3800,
602
+ OPC_VFTINTRM_L_D = 0x729e3c00,
603
+ OPC_VFTINTRP_W_S = 0x729e4000,
604
+ OPC_VFTINTRP_L_D = 0x729e4400,
605
+ OPC_VFTINTRZ_W_S = 0x729e4800,
606
+ OPC_VFTINTRZ_L_D = 0x729e4c00,
607
+ OPC_VFTINTRNE_W_S = 0x729e5000,
608
+ OPC_VFTINTRNE_L_D = 0x729e5400,
609
+ OPC_VFTINT_WU_S = 0x729e5800,
610
+ OPC_VFTINT_LU_D = 0x729e5c00,
611
+ OPC_VFTINTRZ_WU_S = 0x729e7000,
612
+ OPC_VFTINTRZ_LU_D = 0x729e7400,
613
+ OPC_VFTINTL_L_S = 0x729e8000,
614
+ OPC_VFTINTH_L_S = 0x729e8400,
615
+ OPC_VFTINTRML_L_S = 0x729e8800,
616
+ OPC_VFTINTRMH_L_S = 0x729e8c00,
617
+ OPC_VFTINTRPL_L_S = 0x729e9000,
618
+ OPC_VFTINTRPH_L_S = 0x729e9400,
619
+ OPC_VFTINTRZL_L_S = 0x729e9800,
620
+ OPC_VFTINTRZH_L_S = 0x729e9c00,
621
+ OPC_VFTINTRNEL_L_S = 0x729ea000,
622
+ OPC_VFTINTRNEH_L_S = 0x729ea400,
623
+ OPC_VEXTH_H_B = 0x729ee000,
624
+ OPC_VEXTH_W_H = 0x729ee400,
625
+ OPC_VEXTH_D_W = 0x729ee800,
626
+ OPC_VEXTH_Q_D = 0x729eec00,
627
+ OPC_VEXTH_HU_BU = 0x729ef000,
628
+ OPC_VEXTH_WU_HU = 0x729ef400,
629
+ OPC_VEXTH_DU_WU = 0x729ef800,
630
+ OPC_VEXTH_QU_DU = 0x729efc00,
631
+ OPC_VREPLGR2VR_B = 0x729f0000,
632
+ OPC_VREPLGR2VR_H = 0x729f0400,
633
+ OPC_VREPLGR2VR_W = 0x729f0800,
634
+ OPC_VREPLGR2VR_D = 0x729f0c00,
635
+ OPC_VROTRI_B = 0x72a02000,
636
+ OPC_VROTRI_H = 0x72a04000,
637
+ OPC_VROTRI_W = 0x72a08000,
638
+ OPC_VROTRI_D = 0x72a10000,
639
+ OPC_VSRLRI_B = 0x72a42000,
640
+ OPC_VSRLRI_H = 0x72a44000,
641
+ OPC_VSRLRI_W = 0x72a48000,
642
+ OPC_VSRLRI_D = 0x72a50000,
643
+ OPC_VSRARI_B = 0x72a82000,
644
+ OPC_VSRARI_H = 0x72a84000,
645
+ OPC_VSRARI_W = 0x72a88000,
646
+ OPC_VSRARI_D = 0x72a90000,
647
+ OPC_VINSGR2VR_B = 0x72eb8000,
648
+ OPC_VINSGR2VR_H = 0x72ebc000,
649
+ OPC_VINSGR2VR_W = 0x72ebe000,
650
+ OPC_VINSGR2VR_D = 0x72ebf000,
651
+ OPC_VPICKVE2GR_B = 0x72ef8000,
652
+ OPC_VPICKVE2GR_H = 0x72efc000,
653
+ OPC_VPICKVE2GR_W = 0x72efe000,
654
+ OPC_VPICKVE2GR_D = 0x72eff000,
655
+ OPC_VPICKVE2GR_BU = 0x72f38000,
656
+ OPC_VPICKVE2GR_HU = 0x72f3c000,
657
+ OPC_VPICKVE2GR_WU = 0x72f3e000,
658
+ OPC_VPICKVE2GR_DU = 0x72f3f000,
659
+ OPC_VREPLVEI_B = 0x72f78000,
660
+ OPC_VREPLVEI_H = 0x72f7c000,
661
+ OPC_VREPLVEI_W = 0x72f7e000,
662
+ OPC_VREPLVEI_D = 0x72f7f000,
663
+ OPC_VSLLWIL_H_B = 0x73082000,
664
+ OPC_VSLLWIL_W_H = 0x73084000,
665
+ OPC_VSLLWIL_D_W = 0x73088000,
666
+ OPC_VEXTL_Q_D = 0x73090000,
667
+ OPC_VSLLWIL_HU_BU = 0x730c2000,
668
+ OPC_VSLLWIL_WU_HU = 0x730c4000,
669
+ OPC_VSLLWIL_DU_WU = 0x730c8000,
670
+ OPC_VEXTL_QU_DU = 0x730d0000,
671
+ OPC_VBITCLRI_B = 0x73102000,
672
+ OPC_VBITCLRI_H = 0x73104000,
673
+ OPC_VBITCLRI_W = 0x73108000,
674
+ OPC_VBITCLRI_D = 0x73110000,
675
+ OPC_VBITSETI_B = 0x73142000,
676
+ OPC_VBITSETI_H = 0x73144000,
677
+ OPC_VBITSETI_W = 0x73148000,
678
+ OPC_VBITSETI_D = 0x73150000,
679
+ OPC_VBITREVI_B = 0x73182000,
680
+ OPC_VBITREVI_H = 0x73184000,
681
+ OPC_VBITREVI_W = 0x73188000,
682
+ OPC_VBITREVI_D = 0x73190000,
683
+ OPC_VSAT_B = 0x73242000,
684
+ OPC_VSAT_H = 0x73244000,
685
+ OPC_VSAT_W = 0x73248000,
686
+ OPC_VSAT_D = 0x73250000,
687
+ OPC_VSAT_BU = 0x73282000,
688
+ OPC_VSAT_HU = 0x73284000,
689
+ OPC_VSAT_WU = 0x73288000,
690
+ OPC_VSAT_DU = 0x73290000,
691
+ OPC_VSLLI_B = 0x732c2000,
692
+ OPC_VSLLI_H = 0x732c4000,
693
+ OPC_VSLLI_W = 0x732c8000,
694
+ OPC_VSLLI_D = 0x732d0000,
695
+ OPC_VSRLI_B = 0x73302000,
696
+ OPC_VSRLI_H = 0x73304000,
697
+ OPC_VSRLI_W = 0x73308000,
698
+ OPC_VSRLI_D = 0x73310000,
699
+ OPC_VSRAI_B = 0x73342000,
700
+ OPC_VSRAI_H = 0x73344000,
701
+ OPC_VSRAI_W = 0x73348000,
702
+ OPC_VSRAI_D = 0x73350000,
703
+ OPC_VSRLNI_B_H = 0x73404000,
704
+ OPC_VSRLNI_H_W = 0x73408000,
705
+ OPC_VSRLNI_W_D = 0x73410000,
706
+ OPC_VSRLNI_D_Q = 0x73420000,
707
+ OPC_VSRLRNI_B_H = 0x73444000,
708
+ OPC_VSRLRNI_H_W = 0x73448000,
709
+ OPC_VSRLRNI_W_D = 0x73450000,
710
+ OPC_VSRLRNI_D_Q = 0x73460000,
711
+ OPC_VSSRLNI_B_H = 0x73484000,
712
+ OPC_VSSRLNI_H_W = 0x73488000,
713
+ OPC_VSSRLNI_W_D = 0x73490000,
714
+ OPC_VSSRLNI_D_Q = 0x734a0000,
715
+ OPC_VSSRLNI_BU_H = 0x734c4000,
716
+ OPC_VSSRLNI_HU_W = 0x734c8000,
717
+ OPC_VSSRLNI_WU_D = 0x734d0000,
718
+ OPC_VSSRLNI_DU_Q = 0x734e0000,
719
+ OPC_VSSRLRNI_B_H = 0x73504000,
720
+ OPC_VSSRLRNI_H_W = 0x73508000,
721
+ OPC_VSSRLRNI_W_D = 0x73510000,
722
+ OPC_VSSRLRNI_D_Q = 0x73520000,
723
+ OPC_VSSRLRNI_BU_H = 0x73544000,
724
+ OPC_VSSRLRNI_HU_W = 0x73548000,
725
+ OPC_VSSRLRNI_WU_D = 0x73550000,
726
+ OPC_VSSRLRNI_DU_Q = 0x73560000,
727
+ OPC_VSRANI_B_H = 0x73584000,
728
+ OPC_VSRANI_H_W = 0x73588000,
729
+ OPC_VSRANI_W_D = 0x73590000,
730
+ OPC_VSRANI_D_Q = 0x735a0000,
731
+ OPC_VSRARNI_B_H = 0x735c4000,
732
+ OPC_VSRARNI_H_W = 0x735c8000,
733
+ OPC_VSRARNI_W_D = 0x735d0000,
734
+ OPC_VSRARNI_D_Q = 0x735e0000,
735
+ OPC_VSSRANI_B_H = 0x73604000,
736
+ OPC_VSSRANI_H_W = 0x73608000,
737
+ OPC_VSSRANI_W_D = 0x73610000,
738
+ OPC_VSSRANI_D_Q = 0x73620000,
739
+ OPC_VSSRANI_BU_H = 0x73644000,
740
+ OPC_VSSRANI_HU_W = 0x73648000,
741
+ OPC_VSSRANI_WU_D = 0x73650000,
742
+ OPC_VSSRANI_DU_Q = 0x73660000,
743
+ OPC_VSSRARNI_B_H = 0x73684000,
744
+ OPC_VSSRARNI_H_W = 0x73688000,
745
+ OPC_VSSRARNI_W_D = 0x73690000,
746
+ OPC_VSSRARNI_D_Q = 0x736a0000,
747
+ OPC_VSSRARNI_BU_H = 0x736c4000,
748
+ OPC_VSSRARNI_HU_W = 0x736c8000,
749
+ OPC_VSSRARNI_WU_D = 0x736d0000,
750
+ OPC_VSSRARNI_DU_Q = 0x736e0000,
751
+ OPC_VEXTRINS_D = 0x73800000,
752
+ OPC_VEXTRINS_W = 0x73840000,
753
+ OPC_VEXTRINS_H = 0x73880000,
754
+ OPC_VEXTRINS_B = 0x738c0000,
755
+ OPC_VSHUF4I_B = 0x73900000,
756
+ OPC_VSHUF4I_H = 0x73940000,
757
+ OPC_VSHUF4I_W = 0x73980000,
758
+ OPC_VSHUF4I_D = 0x739c0000,
759
+ OPC_VBITSELI_B = 0x73c40000,
760
+ OPC_VANDI_B = 0x73d00000,
761
+ OPC_VORI_B = 0x73d40000,
762
+ OPC_VXORI_B = 0x73d80000,
763
+ OPC_VNORI_B = 0x73dc0000,
764
+ OPC_VLDI = 0x73e00000,
765
+ OPC_VPERMI_W = 0x73e40000,
766
} LoongArchInsn;
767
768
static int32_t __attribute__((unused))
769
@@ -XXX,XX +XXX,XX @@ encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k)
770
return opc | d | j << 5 | k << 10;
771
}
13
}
772
14
773
+static int32_t __attribute__((unused))
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
774
+encode_djka_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
775
+ uint32_t a)
17
{
776
+{
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
777
+ return opc | d | j << 5 | k << 10 | a << 15;
19
int i, nb_oargs;
778
+}
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
779
+
21
ts_info(ts)->z_mask = ctx->z_mask;
780
static int32_t __attribute__((unused))
22
}
781
encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
23
}
782
uint32_t m)
24
+ return true;
783
@@ -XXX,XX +XXX,XX @@ encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
784
return opc | d | j << 5 | k << 10 | m << 16;
785
}
25
}
786
26
787
+static int32_t __attribute__((unused))
27
/*
788
+encode_djkn_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
789
+ uint32_t n)
29
fold_xi_to_x(ctx, op, 0)) {
790
+{
30
return true;
791
+ return opc | d | j << 5 | k << 10 | n << 18;
31
}
792
+}
32
- return false;
793
+
33
+ return finish_folding(ctx, op);
794
static int32_t __attribute__((unused))
795
encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
796
{
797
return opc | d | k << 10;
798
}
34
}
799
35
800
+static int32_t __attribute__((unused))
36
/* We cannot as yet do_constant_folding with vectors. */
801
+encode_cdvj_insn(LoongArchInsn opc, TCGReg cd, TCGReg vj)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
802
+{
38
fold_xi_to_x(ctx, op, 0)) {
803
+ tcg_debug_assert(cd >= 0 && cd <= 0x7);
39
return true;
804
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
40
}
805
+ return encode_dj_slots(opc, cd, vj & 0x1f);
41
- return false;
806
+}
42
+ return finish_folding(ctx, op);
807
+
808
static int32_t __attribute__((unused))
809
encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j)
810
{
811
@@ -XXX,XX +XXX,XX @@ encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
812
return encode_dj_slots(opc, d, sj20 & 0xfffff);
813
}
43
}
814
44
815
+static int32_t __attribute__((unused))
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
816
+encode_dvjuk1_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk1)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
817
+{
47
op->args[4] = arg_new_constant(ctx, bl);
818
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
48
op->args[5] = arg_new_constant(ctx, bh);
819
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
49
}
820
+ tcg_debug_assert(uk1 <= 0x1);
50
- return false;
821
+ return encode_djk_slots(opc, d, vj & 0x1f, uk1);
51
+ return finish_folding(ctx, op);
822
+}
823
+
824
+static int32_t __attribute__((unused))
825
+encode_dvjuk2_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk2)
826
+{
827
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
828
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
829
+ tcg_debug_assert(uk2 <= 0x3);
830
+ return encode_djk_slots(opc, d, vj & 0x1f, uk2);
831
+}
832
+
833
+static int32_t __attribute__((unused))
834
+encode_dvjuk3_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk3)
835
+{
836
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
837
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
838
+ tcg_debug_assert(uk3 <= 0x7);
839
+ return encode_djk_slots(opc, d, vj & 0x1f, uk3);
840
+}
841
+
842
+static int32_t __attribute__((unused))
843
+encode_dvjuk4_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk4)
844
+{
845
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
846
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
847
+ tcg_debug_assert(uk4 <= 0xf);
848
+ return encode_djk_slots(opc, d, vj & 0x1f, uk4);
849
+}
850
+
851
static int32_t __attribute__((unused))
852
encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
853
{
854
@@ -XXX,XX +XXX,XX @@ encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
855
return encode_d_slot(opc, ud15);
856
}
52
}
857
53
858
+static int32_t __attribute__((unused))
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
859
+encode_vdj_insn(LoongArchInsn opc, TCGReg vd, TCGReg j)
860
+{
861
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
862
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
863
+ return encode_dj_slots(opc, vd & 0x1f, j);
864
+}
865
+
866
+static int32_t __attribute__((unused))
867
+encode_vdjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, TCGReg k)
868
+{
869
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
870
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
871
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
872
+ return encode_djk_slots(opc, vd & 0x1f, j, k);
873
+}
874
+
875
+static int32_t __attribute__((unused))
876
+encode_vdjsk10_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk10)
877
+{
878
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
879
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
880
+ tcg_debug_assert(sk10 >= -0x200 && sk10 <= 0x1ff);
881
+ return encode_djk_slots(opc, vd & 0x1f, j, sk10 & 0x3ff);
882
+}
883
+
884
+static int32_t __attribute__((unused))
885
+encode_vdjsk11_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk11)
886
+{
887
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
888
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
889
+ tcg_debug_assert(sk11 >= -0x400 && sk11 <= 0x3ff);
890
+ return encode_djk_slots(opc, vd & 0x1f, j, sk11 & 0x7ff);
891
+}
892
+
893
+static int32_t __attribute__((unused))
894
+encode_vdjsk12_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk12)
895
+{
896
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
897
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
898
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
899
+ return encode_djk_slots(opc, vd & 0x1f, j, sk12 & 0xfff);
900
+}
901
+
902
+static int32_t __attribute__((unused))
903
+encode_vdjsk8un1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
904
+ uint32_t un1)
905
+{
906
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
907
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
908
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
909
+ tcg_debug_assert(un1 <= 0x1);
910
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un1);
911
+}
912
+
913
+static int32_t __attribute__((unused))
914
+encode_vdjsk8un2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
915
+ uint32_t un2)
916
+{
917
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
918
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
919
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
920
+ tcg_debug_assert(un2 <= 0x3);
921
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un2);
922
+}
923
+
924
+static int32_t __attribute__((unused))
925
+encode_vdjsk8un3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
926
+ uint32_t un3)
927
+{
928
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
929
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
930
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
931
+ tcg_debug_assert(un3 <= 0x7);
932
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un3);
933
+}
934
+
935
+static int32_t __attribute__((unused))
936
+encode_vdjsk8un4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
937
+ uint32_t un4)
938
+{
939
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
940
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
941
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
942
+ tcg_debug_assert(un4 <= 0xf);
943
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un4);
944
+}
945
+
946
+static int32_t __attribute__((unused))
947
+encode_vdjsk9_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk9)
948
+{
949
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
950
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
951
+ tcg_debug_assert(sk9 >= -0x100 && sk9 <= 0xff);
952
+ return encode_djk_slots(opc, vd & 0x1f, j, sk9 & 0x1ff);
953
+}
954
+
955
+static int32_t __attribute__((unused))
956
+encode_vdjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk1)
957
+{
958
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
959
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
960
+ tcg_debug_assert(uk1 <= 0x1);
961
+ return encode_djk_slots(opc, vd & 0x1f, j, uk1);
962
+}
963
+
964
+static int32_t __attribute__((unused))
965
+encode_vdjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk2)
966
+{
967
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
968
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
969
+ tcg_debug_assert(uk2 <= 0x3);
970
+ return encode_djk_slots(opc, vd & 0x1f, j, uk2);
971
+}
972
+
973
+static int32_t __attribute__((unused))
974
+encode_vdjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk3)
975
+{
976
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
977
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
978
+ tcg_debug_assert(uk3 <= 0x7);
979
+ return encode_djk_slots(opc, vd & 0x1f, j, uk3);
980
+}
981
+
982
+static int32_t __attribute__((unused))
983
+encode_vdjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk4)
984
+{
985
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
986
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
987
+ tcg_debug_assert(uk4 <= 0xf);
988
+ return encode_djk_slots(opc, vd & 0x1f, j, uk4);
989
+}
990
+
991
+static int32_t __attribute__((unused))
992
+encode_vdsj13_insn(LoongArchInsn opc, TCGReg vd, int32_t sj13)
993
+{
994
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
995
+ tcg_debug_assert(sj13 >= -0x1000 && sj13 <= 0xfff);
996
+ return encode_dj_slots(opc, vd & 0x1f, sj13 & 0x1fff);
997
+}
998
+
999
+static int32_t __attribute__((unused))
1000
+encode_vdvj_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj)
1001
+{
1002
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1003
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1004
+ return encode_dj_slots(opc, vd & 0x1f, vj & 0x1f);
1005
+}
1006
+
1007
+static int32_t __attribute__((unused))
1008
+encode_vdvjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg k)
1009
+{
1010
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1011
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1012
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
1013
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, k);
1014
+}
1015
+
1016
+static int32_t __attribute__((unused))
1017
+encode_vdvjsk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, int32_t sk5)
1018
+{
1019
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1020
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1021
+ tcg_debug_assert(sk5 >= -0x10 && sk5 <= 0xf);
1022
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, sk5 & 0x1f);
1023
+}
1024
+
1025
+static int32_t __attribute__((unused))
1026
+encode_vdvjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk1)
1027
+{
1028
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1029
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1030
+ tcg_debug_assert(uk1 <= 0x1);
1031
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk1);
1032
+}
1033
+
1034
+static int32_t __attribute__((unused))
1035
+encode_vdvjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk2)
1036
+{
1037
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1038
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1039
+ tcg_debug_assert(uk2 <= 0x3);
1040
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk2);
1041
+}
1042
+
1043
+static int32_t __attribute__((unused))
1044
+encode_vdvjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk3)
1045
+{
1046
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1047
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1048
+ tcg_debug_assert(uk3 <= 0x7);
1049
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk3);
1050
+}
1051
+
1052
+static int32_t __attribute__((unused))
1053
+encode_vdvjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk4)
1054
+{
1055
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1056
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1057
+ tcg_debug_assert(uk4 <= 0xf);
1058
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk4);
1059
+}
1060
+
1061
+static int32_t __attribute__((unused))
1062
+encode_vdvjuk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk5)
1063
+{
1064
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1065
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1066
+ tcg_debug_assert(uk5 <= 0x1f);
1067
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk5);
1068
+}
1069
+
1070
+static int32_t __attribute__((unused))
1071
+encode_vdvjuk6_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk6)
1072
+{
1073
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1074
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1075
+ tcg_debug_assert(uk6 <= 0x3f);
1076
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk6);
1077
+}
1078
+
1079
+static int32_t __attribute__((unused))
1080
+encode_vdvjuk7_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk7)
1081
+{
1082
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1083
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1084
+ tcg_debug_assert(uk7 <= 0x7f);
1085
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk7);
1086
+}
1087
+
1088
+static int32_t __attribute__((unused))
1089
+encode_vdvjuk8_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk8)
1090
+{
1091
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1092
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1093
+ tcg_debug_assert(uk8 <= 0xff);
1094
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk8);
1095
+}
1096
+
1097
+static int32_t __attribute__((unused))
1098
+encode_vdvjvk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk)
1099
+{
1100
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1101
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1102
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
1103
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f);
1104
+}
1105
+
1106
+static int32_t __attribute__((unused))
1107
+encode_vdvjvkva_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk,
1108
+ TCGReg va)
1109
+{
1110
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1111
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1112
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
1113
+ tcg_debug_assert(va >= 0x20 && va <= 0x3f);
1114
+ return encode_djka_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f, va & 0x1f);
1115
+}
1116
+
1117
/* Emits the `clz.w d, j` instruction. */
1118
static void __attribute__((unused))
1119
tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
1120
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
1121
tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
1122
}
1123
1124
+/* Emits the `vfmadd.s vd, vj, vk, va` instruction. */
1125
+static void __attribute__((unused))
1126
+tcg_out_opc_vfmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1127
+{
1128
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_S, vd, vj, vk, va));
1129
+}
1130
+
1131
+/* Emits the `vfmadd.d vd, vj, vk, va` instruction. */
1132
+static void __attribute__((unused))
1133
+tcg_out_opc_vfmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1134
+{
1135
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_D, vd, vj, vk, va));
1136
+}
1137
+
1138
+/* Emits the `vfmsub.s vd, vj, vk, va` instruction. */
1139
+static void __attribute__((unused))
1140
+tcg_out_opc_vfmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1141
+{
1142
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_S, vd, vj, vk, va));
1143
+}
1144
+
1145
+/* Emits the `vfmsub.d vd, vj, vk, va` instruction. */
1146
+static void __attribute__((unused))
1147
+tcg_out_opc_vfmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1148
+{
1149
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_D, vd, vj, vk, va));
1150
+}
1151
+
1152
+/* Emits the `vfnmadd.s vd, vj, vk, va` instruction. */
1153
+static void __attribute__((unused))
1154
+tcg_out_opc_vfnmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1155
+{
1156
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_S, vd, vj, vk, va));
1157
+}
1158
+
1159
+/* Emits the `vfnmadd.d vd, vj, vk, va` instruction. */
1160
+static void __attribute__((unused))
1161
+tcg_out_opc_vfnmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1162
+{
1163
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_D, vd, vj, vk, va));
1164
+}
1165
+
1166
+/* Emits the `vfnmsub.s vd, vj, vk, va` instruction. */
1167
+static void __attribute__((unused))
1168
+tcg_out_opc_vfnmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1169
+{
1170
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_S, vd, vj, vk, va));
1171
+}
1172
+
1173
+/* Emits the `vfnmsub.d vd, vj, vk, va` instruction. */
1174
+static void __attribute__((unused))
1175
+tcg_out_opc_vfnmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1176
+{
1177
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_D, vd, vj, vk, va));
1178
+}
1179
+
1180
+/* Emits the `vfcmp.caf.s vd, vj, vk` instruction. */
1181
+static void __attribute__((unused))
1182
+tcg_out_opc_vfcmp_caf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1183
+{
1184
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_S, vd, vj, vk));
1185
+}
1186
+
1187
+/* Emits the `vfcmp.saf.s vd, vj, vk` instruction. */
1188
+static void __attribute__((unused))
1189
+tcg_out_opc_vfcmp_saf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1190
+{
1191
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_S, vd, vj, vk));
1192
+}
1193
+
1194
+/* Emits the `vfcmp.clt.s vd, vj, vk` instruction. */
1195
+static void __attribute__((unused))
1196
+tcg_out_opc_vfcmp_clt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1197
+{
1198
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_S, vd, vj, vk));
1199
+}
1200
+
1201
+/* Emits the `vfcmp.slt.s vd, vj, vk` instruction. */
1202
+static void __attribute__((unused))
1203
+tcg_out_opc_vfcmp_slt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1204
+{
1205
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_S, vd, vj, vk));
1206
+}
1207
+
1208
+/* Emits the `vfcmp.ceq.s vd, vj, vk` instruction. */
1209
+static void __attribute__((unused))
1210
+tcg_out_opc_vfcmp_ceq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1211
+{
1212
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_S, vd, vj, vk));
1213
+}
1214
+
1215
+/* Emits the `vfcmp.seq.s vd, vj, vk` instruction. */
1216
+static void __attribute__((unused))
1217
+tcg_out_opc_vfcmp_seq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1218
+{
1219
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_S, vd, vj, vk));
1220
+}
1221
+
1222
+/* Emits the `vfcmp.cle.s vd, vj, vk` instruction. */
1223
+static void __attribute__((unused))
1224
+tcg_out_opc_vfcmp_cle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1225
+{
1226
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_S, vd, vj, vk));
1227
+}
1228
+
1229
+/* Emits the `vfcmp.sle.s vd, vj, vk` instruction. */
1230
+static void __attribute__((unused))
1231
+tcg_out_opc_vfcmp_sle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1232
+{
1233
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_S, vd, vj, vk));
1234
+}
1235
+
1236
+/* Emits the `vfcmp.cun.s vd, vj, vk` instruction. */
1237
+static void __attribute__((unused))
1238
+tcg_out_opc_vfcmp_cun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1239
+{
1240
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_S, vd, vj, vk));
1241
+}
1242
+
1243
+/* Emits the `vfcmp.sun.s vd, vj, vk` instruction. */
1244
+static void __attribute__((unused))
1245
+tcg_out_opc_vfcmp_sun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1246
+{
1247
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_S, vd, vj, vk));
1248
+}
1249
+
1250
+/* Emits the `vfcmp.cult.s vd, vj, vk` instruction. */
1251
+static void __attribute__((unused))
1252
+tcg_out_opc_vfcmp_cult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1253
+{
1254
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_S, vd, vj, vk));
1255
+}
1256
+
1257
+/* Emits the `vfcmp.sult.s vd, vj, vk` instruction. */
1258
+static void __attribute__((unused))
1259
+tcg_out_opc_vfcmp_sult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1260
+{
1261
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_S, vd, vj, vk));
1262
+}
1263
+
1264
+/* Emits the `vfcmp.cueq.s vd, vj, vk` instruction. */
1265
+static void __attribute__((unused))
1266
+tcg_out_opc_vfcmp_cueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1267
+{
1268
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_S, vd, vj, vk));
1269
+}
1270
+
1271
+/* Emits the `vfcmp.sueq.s vd, vj, vk` instruction. */
1272
+static void __attribute__((unused))
1273
+tcg_out_opc_vfcmp_sueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1274
+{
1275
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_S, vd, vj, vk));
1276
+}
1277
+
1278
+/* Emits the `vfcmp.cule.s vd, vj, vk` instruction. */
1279
+static void __attribute__((unused))
1280
+tcg_out_opc_vfcmp_cule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1281
+{
1282
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_S, vd, vj, vk));
1283
+}
1284
+
1285
+/* Emits the `vfcmp.sule.s vd, vj, vk` instruction. */
1286
+static void __attribute__((unused))
1287
+tcg_out_opc_vfcmp_sule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1288
+{
1289
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_S, vd, vj, vk));
1290
+}
1291
+
1292
+/* Emits the `vfcmp.cne.s vd, vj, vk` instruction. */
1293
+static void __attribute__((unused))
1294
+tcg_out_opc_vfcmp_cne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1295
+{
1296
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_S, vd, vj, vk));
1297
+}
1298
+
1299
+/* Emits the `vfcmp.sne.s vd, vj, vk` instruction. */
1300
+static void __attribute__((unused))
1301
+tcg_out_opc_vfcmp_sne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1302
+{
1303
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_S, vd, vj, vk));
1304
+}
1305
+
1306
+/* Emits the `vfcmp.cor.s vd, vj, vk` instruction. */
1307
+static void __attribute__((unused))
1308
+tcg_out_opc_vfcmp_cor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1309
+{
1310
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_S, vd, vj, vk));
1311
+}
1312
+
1313
+/* Emits the `vfcmp.sor.s vd, vj, vk` instruction. */
1314
+static void __attribute__((unused))
1315
+tcg_out_opc_vfcmp_sor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1316
+{
1317
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_S, vd, vj, vk));
1318
+}
1319
+
1320
+/* Emits the `vfcmp.cune.s vd, vj, vk` instruction. */
1321
+static void __attribute__((unused))
1322
+tcg_out_opc_vfcmp_cune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1323
+{
1324
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_S, vd, vj, vk));
1325
+}
1326
+
1327
+/* Emits the `vfcmp.sune.s vd, vj, vk` instruction. */
1328
+static void __attribute__((unused))
1329
+tcg_out_opc_vfcmp_sune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1330
+{
1331
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_S, vd, vj, vk));
1332
+}
1333
+
1334
+/* Emits the `vfcmp.caf.d vd, vj, vk` instruction. */
1335
+static void __attribute__((unused))
1336
+tcg_out_opc_vfcmp_caf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1337
+{
1338
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_D, vd, vj, vk));
1339
+}
1340
+
1341
+/* Emits the `vfcmp.saf.d vd, vj, vk` instruction. */
1342
+static void __attribute__((unused))
1343
+tcg_out_opc_vfcmp_saf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1344
+{
1345
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_D, vd, vj, vk));
1346
+}
1347
+
1348
+/* Emits the `vfcmp.clt.d vd, vj, vk` instruction. */
1349
+static void __attribute__((unused))
1350
+tcg_out_opc_vfcmp_clt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1351
+{
1352
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_D, vd, vj, vk));
1353
+}
1354
+
1355
+/* Emits the `vfcmp.slt.d vd, vj, vk` instruction. */
1356
+static void __attribute__((unused))
1357
+tcg_out_opc_vfcmp_slt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1358
+{
1359
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_D, vd, vj, vk));
1360
+}
1361
+
1362
+/* Emits the `vfcmp.ceq.d vd, vj, vk` instruction. */
1363
+static void __attribute__((unused))
1364
+tcg_out_opc_vfcmp_ceq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1365
+{
1366
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_D, vd, vj, vk));
1367
+}
1368
+
1369
+/* Emits the `vfcmp.seq.d vd, vj, vk` instruction. */
1370
+static void __attribute__((unused))
1371
+tcg_out_opc_vfcmp_seq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1372
+{
1373
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_D, vd, vj, vk));
1374
+}
1375
+
1376
+/* Emits the `vfcmp.cle.d vd, vj, vk` instruction. */
1377
+static void __attribute__((unused))
1378
+tcg_out_opc_vfcmp_cle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1379
+{
1380
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_D, vd, vj, vk));
1381
+}
1382
+
1383
+/* Emits the `vfcmp.sle.d vd, vj, vk` instruction. */
1384
+static void __attribute__((unused))
1385
+tcg_out_opc_vfcmp_sle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1386
+{
1387
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_D, vd, vj, vk));
1388
+}
1389
+
1390
+/* Emits the `vfcmp.cun.d vd, vj, vk` instruction. */
1391
+static void __attribute__((unused))
1392
+tcg_out_opc_vfcmp_cun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1393
+{
1394
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_D, vd, vj, vk));
1395
+}
1396
+
1397
+/* Emits the `vfcmp.sun.d vd, vj, vk` instruction. */
1398
+static void __attribute__((unused))
1399
+tcg_out_opc_vfcmp_sun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1400
+{
1401
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_D, vd, vj, vk));
1402
+}
1403
+
1404
+/* Emits the `vfcmp.cult.d vd, vj, vk` instruction. */
1405
+static void __attribute__((unused))
1406
+tcg_out_opc_vfcmp_cult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1407
+{
1408
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_D, vd, vj, vk));
1409
+}
1410
+
1411
+/* Emits the `vfcmp.sult.d vd, vj, vk` instruction. */
1412
+static void __attribute__((unused))
1413
+tcg_out_opc_vfcmp_sult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1414
+{
1415
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_D, vd, vj, vk));
1416
+}
1417
+
1418
+/* Emits the `vfcmp.cueq.d vd, vj, vk` instruction. */
1419
+static void __attribute__((unused))
1420
+tcg_out_opc_vfcmp_cueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1421
+{
1422
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_D, vd, vj, vk));
1423
+}
1424
+
1425
+/* Emits the `vfcmp.sueq.d vd, vj, vk` instruction. */
1426
+static void __attribute__((unused))
1427
+tcg_out_opc_vfcmp_sueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1428
+{
1429
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_D, vd, vj, vk));
1430
+}
1431
+
1432
+/* Emits the `vfcmp.cule.d vd, vj, vk` instruction. */
1433
+static void __attribute__((unused))
1434
+tcg_out_opc_vfcmp_cule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1435
+{
1436
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_D, vd, vj, vk));
1437
+}
1438
+
1439
+/* Emits the `vfcmp.sule.d vd, vj, vk` instruction. */
1440
+static void __attribute__((unused))
1441
+tcg_out_opc_vfcmp_sule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1442
+{
1443
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_D, vd, vj, vk));
1444
+}
1445
+
1446
+/* Emits the `vfcmp.cne.d vd, vj, vk` instruction. */
1447
+static void __attribute__((unused))
1448
+tcg_out_opc_vfcmp_cne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1449
+{
1450
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_D, vd, vj, vk));
1451
+}
1452
+
1453
+/* Emits the `vfcmp.sne.d vd, vj, vk` instruction. */
1454
+static void __attribute__((unused))
1455
+tcg_out_opc_vfcmp_sne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1456
+{
1457
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_D, vd, vj, vk));
1458
+}
1459
+
1460
+/* Emits the `vfcmp.cor.d vd, vj, vk` instruction. */
1461
+static void __attribute__((unused))
1462
+tcg_out_opc_vfcmp_cor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1463
+{
1464
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_D, vd, vj, vk));
1465
+}
1466
+
1467
+/* Emits the `vfcmp.sor.d vd, vj, vk` instruction. */
1468
+static void __attribute__((unused))
1469
+tcg_out_opc_vfcmp_sor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1470
+{
1471
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_D, vd, vj, vk));
1472
+}
1473
+
1474
+/* Emits the `vfcmp.cune.d vd, vj, vk` instruction. */
1475
+static void __attribute__((unused))
1476
+tcg_out_opc_vfcmp_cune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1477
+{
1478
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_D, vd, vj, vk));
1479
+}
1480
+
1481
+/* Emits the `vfcmp.sune.d vd, vj, vk` instruction. */
1482
+static void __attribute__((unused))
1483
+tcg_out_opc_vfcmp_sune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1484
+{
1485
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_D, vd, vj, vk));
1486
+}
1487
+
1488
+/* Emits the `vbitsel.v vd, vj, vk, va` instruction. */
1489
+static void __attribute__((unused))
1490
+tcg_out_opc_vbitsel_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1491
+{
1492
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VBITSEL_V, vd, vj, vk, va));
1493
+}
1494
+
1495
+/* Emits the `vshuf.b vd, vj, vk, va` instruction. */
1496
+static void __attribute__((unused))
1497
+tcg_out_opc_vshuf_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1498
+{
1499
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VSHUF_B, vd, vj, vk, va));
1500
+}
1501
+
1502
/* Emits the `addu16i.d d, j, sk16` instruction. */
1503
static void __attribute__((unused))
1504
tcg_out_opc_addu16i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1505
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
1506
tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
1507
}
1508
1509
+/* Emits the `vld vd, j, sk12` instruction. */
1510
+static void __attribute__((unused))
1511
+tcg_out_opc_vld(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1512
+{
1513
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLD, vd, j, sk12));
1514
+}
1515
+
1516
+/* Emits the `vst vd, j, sk12` instruction. */
1517
+static void __attribute__((unused))
1518
+tcg_out_opc_vst(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1519
+{
1520
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VST, vd, j, sk12));
1521
+}
1522
+
1523
+/* Emits the `vldrepl.d vd, j, sk9` instruction. */
1524
+static void __attribute__((unused))
1525
+tcg_out_opc_vldrepl_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk9)
1526
+{
1527
+ tcg_out32(s, encode_vdjsk9_insn(OPC_VLDREPL_D, vd, j, sk9));
1528
+}
1529
+
1530
+/* Emits the `vldrepl.w vd, j, sk10` instruction. */
1531
+static void __attribute__((unused))
1532
+tcg_out_opc_vldrepl_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk10)
1533
+{
1534
+ tcg_out32(s, encode_vdjsk10_insn(OPC_VLDREPL_W, vd, j, sk10));
1535
+}
1536
+
1537
+/* Emits the `vldrepl.h vd, j, sk11` instruction. */
1538
+static void __attribute__((unused))
1539
+tcg_out_opc_vldrepl_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk11)
1540
+{
1541
+ tcg_out32(s, encode_vdjsk11_insn(OPC_VLDREPL_H, vd, j, sk11));
1542
+}
1543
+
1544
+/* Emits the `vldrepl.b vd, j, sk12` instruction. */
1545
+static void __attribute__((unused))
1546
+tcg_out_opc_vldrepl_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1547
+{
1548
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLDREPL_B, vd, j, sk12));
1549
+}
1550
+
1551
+/* Emits the `vstelm.d vd, j, sk8, un1` instruction. */
1552
+static void __attribute__((unused))
1553
+tcg_out_opc_vstelm_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1554
+ uint32_t un1)
1555
+{
1556
+ tcg_out32(s, encode_vdjsk8un1_insn(OPC_VSTELM_D, vd, j, sk8, un1));
1557
+}
1558
+
1559
+/* Emits the `vstelm.w vd, j, sk8, un2` instruction. */
1560
+static void __attribute__((unused))
1561
+tcg_out_opc_vstelm_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1562
+ uint32_t un2)
1563
+{
1564
+ tcg_out32(s, encode_vdjsk8un2_insn(OPC_VSTELM_W, vd, j, sk8, un2));
1565
+}
1566
+
1567
+/* Emits the `vstelm.h vd, j, sk8, un3` instruction. */
1568
+static void __attribute__((unused))
1569
+tcg_out_opc_vstelm_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1570
+ uint32_t un3)
1571
+{
1572
+ tcg_out32(s, encode_vdjsk8un3_insn(OPC_VSTELM_H, vd, j, sk8, un3));
1573
+}
1574
+
1575
+/* Emits the `vstelm.b vd, j, sk8, un4` instruction. */
1576
+static void __attribute__((unused))
1577
+tcg_out_opc_vstelm_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1578
+ uint32_t un4)
1579
+{
1580
+ tcg_out32(s, encode_vdjsk8un4_insn(OPC_VSTELM_B, vd, j, sk8, un4));
1581
+}
1582
+
1583
/* Emits the `ldx.b d, j, k` instruction. */
1584
static void __attribute__((unused))
1585
tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1586
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1587
tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
1588
}
1589
1590
+/* Emits the `vldx vd, j, k` instruction. */
1591
+static void __attribute__((unused))
1592
+tcg_out_opc_vldx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
1593
+{
1594
+ tcg_out32(s, encode_vdjk_insn(OPC_VLDX, vd, j, k));
1595
+}
1596
+
1597
+/* Emits the `vstx vd, j, k` instruction. */
1598
+static void __attribute__((unused))
1599
+tcg_out_opc_vstx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
1600
+{
1601
+ tcg_out32(s, encode_vdjk_insn(OPC_VSTX, vd, j, k));
1602
+}
1603
+
1604
/* Emits the `dbar ud15` instruction. */
1605
static void __attribute__((unused))
1606
tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
1607
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1608
tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16));
1609
}
1610
1611
+/* Emits the `vseq.b vd, vj, vk` instruction. */
1612
+static void __attribute__((unused))
1613
+tcg_out_opc_vseq_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1614
+{
1615
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_B, vd, vj, vk));
1616
+}
1617
+
1618
+/* Emits the `vseq.h vd, vj, vk` instruction. */
1619
+static void __attribute__((unused))
1620
+tcg_out_opc_vseq_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1621
+{
1622
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_H, vd, vj, vk));
1623
+}
1624
+
1625
+/* Emits the `vseq.w vd, vj, vk` instruction. */
1626
+static void __attribute__((unused))
1627
+tcg_out_opc_vseq_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1628
+{
1629
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_W, vd, vj, vk));
1630
+}
1631
+
1632
+/* Emits the `vseq.d vd, vj, vk` instruction. */
1633
+static void __attribute__((unused))
1634
+tcg_out_opc_vseq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1635
+{
1636
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_D, vd, vj, vk));
1637
+}
1638
+
1639
+/* Emits the `vsle.b vd, vj, vk` instruction. */
1640
+static void __attribute__((unused))
1641
+tcg_out_opc_vsle_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1642
+{
1643
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_B, vd, vj, vk));
1644
+}
1645
+
1646
+/* Emits the `vsle.h vd, vj, vk` instruction. */
1647
+static void __attribute__((unused))
1648
+tcg_out_opc_vsle_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1649
+{
1650
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_H, vd, vj, vk));
1651
+}
1652
+
1653
+/* Emits the `vsle.w vd, vj, vk` instruction. */
1654
+static void __attribute__((unused))
1655
+tcg_out_opc_vsle_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1656
+{
1657
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_W, vd, vj, vk));
1658
+}
1659
+
1660
+/* Emits the `vsle.d vd, vj, vk` instruction. */
1661
+static void __attribute__((unused))
1662
+tcg_out_opc_vsle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1663
+{
1664
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_D, vd, vj, vk));
1665
+}
1666
+
1667
+/* Emits the `vsle.bu vd, vj, vk` instruction. */
1668
+static void __attribute__((unused))
1669
+tcg_out_opc_vsle_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1670
+{
1671
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_BU, vd, vj, vk));
1672
+}
1673
+
1674
+/* Emits the `vsle.hu vd, vj, vk` instruction. */
1675
+static void __attribute__((unused))
1676
+tcg_out_opc_vsle_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1677
+{
1678
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_HU, vd, vj, vk));
1679
+}
1680
+
1681
+/* Emits the `vsle.wu vd, vj, vk` instruction. */
1682
+static void __attribute__((unused))
1683
+tcg_out_opc_vsle_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1684
+{
1685
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_WU, vd, vj, vk));
1686
+}
1687
+
1688
+/* Emits the `vsle.du vd, vj, vk` instruction. */
1689
+static void __attribute__((unused))
1690
+tcg_out_opc_vsle_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1691
+{
1692
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_DU, vd, vj, vk));
1693
+}
1694
+
1695
+/* Emits the `vslt.b vd, vj, vk` instruction. */
1696
+static void __attribute__((unused))
1697
+tcg_out_opc_vslt_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1698
+{
1699
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_B, vd, vj, vk));
1700
+}
1701
+
1702
+/* Emits the `vslt.h vd, vj, vk` instruction. */
1703
+static void __attribute__((unused))
1704
+tcg_out_opc_vslt_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1705
+{
1706
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_H, vd, vj, vk));
1707
+}
1708
+
1709
+/* Emits the `vslt.w vd, vj, vk` instruction. */
1710
+static void __attribute__((unused))
1711
+tcg_out_opc_vslt_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1712
+{
1713
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_W, vd, vj, vk));
1714
+}
1715
+
1716
+/* Emits the `vslt.d vd, vj, vk` instruction. */
1717
+static void __attribute__((unused))
1718
+tcg_out_opc_vslt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1719
+{
1720
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_D, vd, vj, vk));
1721
+}
1722
+
1723
+/* Emits the `vslt.bu vd, vj, vk` instruction. */
1724
+static void __attribute__((unused))
1725
+tcg_out_opc_vslt_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1726
+{
1727
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_BU, vd, vj, vk));
1728
+}
1729
+
1730
+/* Emits the `vslt.hu vd, vj, vk` instruction. */
1731
+static void __attribute__((unused))
1732
+tcg_out_opc_vslt_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1733
+{
1734
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_HU, vd, vj, vk));
1735
+}
1736
+
1737
+/* Emits the `vslt.wu vd, vj, vk` instruction. */
1738
+static void __attribute__((unused))
1739
+tcg_out_opc_vslt_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1740
+{
1741
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_WU, vd, vj, vk));
1742
+}
1743
+
1744
+/* Emits the `vslt.du vd, vj, vk` instruction. */
1745
+static void __attribute__((unused))
1746
+tcg_out_opc_vslt_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1747
+{
1748
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_DU, vd, vj, vk));
1749
+}
1750
+
1751
+/* Emits the `vadd.b vd, vj, vk` instruction. */
1752
+static void __attribute__((unused))
1753
+tcg_out_opc_vadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1754
+{
1755
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_B, vd, vj, vk));
1756
+}
1757
+
1758
+/* Emits the `vadd.h vd, vj, vk` instruction. */
1759
+static void __attribute__((unused))
1760
+tcg_out_opc_vadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1761
+{
1762
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_H, vd, vj, vk));
1763
+}
1764
+
1765
+/* Emits the `vadd.w vd, vj, vk` instruction. */
1766
+static void __attribute__((unused))
1767
+tcg_out_opc_vadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1768
+{
1769
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_W, vd, vj, vk));
1770
+}
1771
+
1772
+/* Emits the `vadd.d vd, vj, vk` instruction. */
1773
+static void __attribute__((unused))
1774
+tcg_out_opc_vadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1775
+{
1776
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_D, vd, vj, vk));
1777
+}
1778
+
1779
+/* Emits the `vsub.b vd, vj, vk` instruction. */
1780
+static void __attribute__((unused))
1781
+tcg_out_opc_vsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1782
+{
1783
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_B, vd, vj, vk));
1784
+}
1785
+
1786
+/* Emits the `vsub.h vd, vj, vk` instruction. */
1787
+static void __attribute__((unused))
1788
+tcg_out_opc_vsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1789
+{
1790
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_H, vd, vj, vk));
1791
+}
1792
+
1793
+/* Emits the `vsub.w vd, vj, vk` instruction. */
1794
+static void __attribute__((unused))
1795
+tcg_out_opc_vsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1796
+{
1797
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_W, vd, vj, vk));
1798
+}
1799
+
1800
+/* Emits the `vsub.d vd, vj, vk` instruction. */
1801
+static void __attribute__((unused))
1802
+tcg_out_opc_vsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1803
+{
1804
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_D, vd, vj, vk));
1805
+}
1806
+
1807
+/* Emits the `vaddwev.h.b vd, vj, vk` instruction. */
1808
+static void __attribute__((unused))
1809
+tcg_out_opc_vaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1810
+{
1811
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_B, vd, vj, vk));
1812
+}
1813
+
1814
+/* Emits the `vaddwev.w.h vd, vj, vk` instruction. */
1815
+static void __attribute__((unused))
1816
+tcg_out_opc_vaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1817
+{
1818
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_H, vd, vj, vk));
1819
+}
1820
+
1821
+/* Emits the `vaddwev.d.w vd, vj, vk` instruction. */
1822
+static void __attribute__((unused))
1823
+tcg_out_opc_vaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1824
+{
1825
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_W, vd, vj, vk));
1826
+}
1827
+
1828
+/* Emits the `vaddwev.q.d vd, vj, vk` instruction. */
1829
+static void __attribute__((unused))
1830
+tcg_out_opc_vaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1831
+{
1832
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_D, vd, vj, vk));
1833
+}
1834
+
1835
+/* Emits the `vsubwev.h.b vd, vj, vk` instruction. */
1836
+static void __attribute__((unused))
1837
+tcg_out_opc_vsubwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1838
+{
1839
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_B, vd, vj, vk));
1840
+}
1841
+
1842
+/* Emits the `vsubwev.w.h vd, vj, vk` instruction. */
1843
+static void __attribute__((unused))
1844
+tcg_out_opc_vsubwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1845
+{
1846
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_H, vd, vj, vk));
1847
+}
1848
+
1849
+/* Emits the `vsubwev.d.w vd, vj, vk` instruction. */
1850
+static void __attribute__((unused))
1851
+tcg_out_opc_vsubwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1852
+{
1853
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_W, vd, vj, vk));
1854
+}
1855
+
1856
+/* Emits the `vsubwev.q.d vd, vj, vk` instruction. */
1857
+static void __attribute__((unused))
1858
+tcg_out_opc_vsubwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1859
+{
1860
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_D, vd, vj, vk));
1861
+}
1862
+
1863
+/* Emits the `vaddwod.h.b vd, vj, vk` instruction. */
1864
+static void __attribute__((unused))
1865
+tcg_out_opc_vaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1866
+{
1867
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_B, vd, vj, vk));
1868
+}
1869
+
1870
+/* Emits the `vaddwod.w.h vd, vj, vk` instruction. */
1871
+static void __attribute__((unused))
1872
+tcg_out_opc_vaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1873
+{
1874
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_H, vd, vj, vk));
1875
+}
1876
+
1877
+/* Emits the `vaddwod.d.w vd, vj, vk` instruction. */
1878
+static void __attribute__((unused))
1879
+tcg_out_opc_vaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1880
+{
1881
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_W, vd, vj, vk));
1882
+}
1883
+
1884
+/* Emits the `vaddwod.q.d vd, vj, vk` instruction. */
1885
+static void __attribute__((unused))
1886
+tcg_out_opc_vaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1887
+{
1888
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_D, vd, vj, vk));
1889
+}
1890
+
1891
+/* Emits the `vsubwod.h.b vd, vj, vk` instruction. */
1892
+static void __attribute__((unused))
1893
+tcg_out_opc_vsubwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1894
+{
1895
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_B, vd, vj, vk));
1896
+}
1897
+
1898
+/* Emits the `vsubwod.w.h vd, vj, vk` instruction. */
1899
+static void __attribute__((unused))
1900
+tcg_out_opc_vsubwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1901
+{
1902
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_H, vd, vj, vk));
1903
+}
1904
+
1905
+/* Emits the `vsubwod.d.w vd, vj, vk` instruction. */
1906
+static void __attribute__((unused))
1907
+tcg_out_opc_vsubwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1908
+{
1909
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_W, vd, vj, vk));
1910
+}
1911
+
1912
+/* Emits the `vsubwod.q.d vd, vj, vk` instruction. */
1913
+static void __attribute__((unused))
1914
+tcg_out_opc_vsubwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1915
+{
1916
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_D, vd, vj, vk));
1917
+}
1918
+
1919
+/* Emits the `vaddwev.h.bu vd, vj, vk` instruction. */
1920
+static void __attribute__((unused))
1921
+tcg_out_opc_vaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1922
+{
1923
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU, vd, vj, vk));
1924
+}
1925
+
1926
+/* Emits the `vaddwev.w.hu vd, vj, vk` instruction. */
1927
+static void __attribute__((unused))
1928
+tcg_out_opc_vaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1929
+{
1930
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU, vd, vj, vk));
1931
+}
1932
+
1933
+/* Emits the `vaddwev.d.wu vd, vj, vk` instruction. */
1934
+static void __attribute__((unused))
1935
+tcg_out_opc_vaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1936
+{
1937
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU, vd, vj, vk));
1938
+}
1939
+
1940
+/* Emits the `vaddwev.q.du vd, vj, vk` instruction. */
1941
+static void __attribute__((unused))
1942
+tcg_out_opc_vaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1943
+{
1944
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU, vd, vj, vk));
1945
+}
1946
+
1947
+/* Emits the `vsubwev.h.bu vd, vj, vk` instruction. */
1948
+static void __attribute__((unused))
1949
+tcg_out_opc_vsubwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1950
+{
1951
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_BU, vd, vj, vk));
1952
+}
1953
+
1954
+/* Emits the `vsubwev.w.hu vd, vj, vk` instruction. */
1955
+static void __attribute__((unused))
1956
+tcg_out_opc_vsubwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1957
+{
1958
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_HU, vd, vj, vk));
1959
+}
1960
+
1961
+/* Emits the `vsubwev.d.wu vd, vj, vk` instruction. */
1962
+static void __attribute__((unused))
1963
+tcg_out_opc_vsubwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1964
+{
1965
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_WU, vd, vj, vk));
1966
+}
1967
+
1968
+/* Emits the `vsubwev.q.du vd, vj, vk` instruction. */
1969
+static void __attribute__((unused))
1970
+tcg_out_opc_vsubwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1971
+{
1972
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_DU, vd, vj, vk));
1973
+}
1974
+
1975
+/* Emits the `vaddwod.h.bu vd, vj, vk` instruction. */
1976
+static void __attribute__((unused))
1977
+tcg_out_opc_vaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1978
+{
1979
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU, vd, vj, vk));
1980
+}
1981
+
1982
+/* Emits the `vaddwod.w.hu vd, vj, vk` instruction. */
1983
+static void __attribute__((unused))
1984
+tcg_out_opc_vaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1985
+{
1986
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU, vd, vj, vk));
1987
+}
1988
+
1989
+/* Emits the `vaddwod.d.wu vd, vj, vk` instruction. */
1990
+static void __attribute__((unused))
1991
+tcg_out_opc_vaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1992
+{
1993
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU, vd, vj, vk));
1994
+}
1995
+
1996
+/* Emits the `vaddwod.q.du vd, vj, vk` instruction. */
1997
+static void __attribute__((unused))
1998
+tcg_out_opc_vaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1999
+{
2000
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU, vd, vj, vk));
2001
+}
2002
+
2003
+/* Emits the `vsubwod.h.bu vd, vj, vk` instruction. */
2004
+static void __attribute__((unused))
2005
+tcg_out_opc_vsubwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2006
+{
2007
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_BU, vd, vj, vk));
2008
+}
2009
+
2010
+/* Emits the `vsubwod.w.hu vd, vj, vk` instruction. */
2011
+static void __attribute__((unused))
2012
+tcg_out_opc_vsubwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2013
+{
2014
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_HU, vd, vj, vk));
2015
+}
2016
+
2017
+/* Emits the `vsubwod.d.wu vd, vj, vk` instruction. */
2018
+static void __attribute__((unused))
2019
+tcg_out_opc_vsubwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2020
+{
2021
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_WU, vd, vj, vk));
2022
+}
2023
+
2024
+/* Emits the `vsubwod.q.du vd, vj, vk` instruction. */
2025
+static void __attribute__((unused))
2026
+tcg_out_opc_vsubwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2027
+{
2028
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_DU, vd, vj, vk));
2029
+}
2030
+
2031
+/* Emits the `vaddwev.h.bu.b vd, vj, vk` instruction. */
2032
+static void __attribute__((unused))
2033
+tcg_out_opc_vaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2034
+{
2035
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU_B, vd, vj, vk));
2036
+}
2037
+
2038
+/* Emits the `vaddwev.w.hu.h vd, vj, vk` instruction. */
2039
+static void __attribute__((unused))
2040
+tcg_out_opc_vaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2041
+{
2042
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU_H, vd, vj, vk));
2043
+}
2044
+
2045
+/* Emits the `vaddwev.d.wu.w vd, vj, vk` instruction. */
2046
+static void __attribute__((unused))
2047
+tcg_out_opc_vaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2048
+{
2049
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU_W, vd, vj, vk));
2050
+}
2051
+
2052
+/* Emits the `vaddwev.q.du.d vd, vj, vk` instruction. */
2053
+static void __attribute__((unused))
2054
+tcg_out_opc_vaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2055
+{
2056
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU_D, vd, vj, vk));
2057
+}
2058
+
2059
+/* Emits the `vaddwod.h.bu.b vd, vj, vk` instruction. */
2060
+static void __attribute__((unused))
2061
+tcg_out_opc_vaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2062
+{
2063
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU_B, vd, vj, vk));
2064
+}
2065
+
2066
+/* Emits the `vaddwod.w.hu.h vd, vj, vk` instruction. */
2067
+static void __attribute__((unused))
2068
+tcg_out_opc_vaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2069
+{
2070
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU_H, vd, vj, vk));
2071
+}
2072
+
2073
+/* Emits the `vaddwod.d.wu.w vd, vj, vk` instruction. */
2074
+static void __attribute__((unused))
2075
+tcg_out_opc_vaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2076
+{
2077
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU_W, vd, vj, vk));
2078
+}
2079
+
2080
+/* Emits the `vaddwod.q.du.d vd, vj, vk` instruction. */
2081
+static void __attribute__((unused))
2082
+tcg_out_opc_vaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2083
+{
2084
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU_D, vd, vj, vk));
2085
+}
2086
+
2087
+/* Emits the `vsadd.b vd, vj, vk` instruction. */
2088
+static void __attribute__((unused))
2089
+tcg_out_opc_vsadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2090
+{
2091
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_B, vd, vj, vk));
2092
+}
2093
+
2094
+/* Emits the `vsadd.h vd, vj, vk` instruction. */
2095
+static void __attribute__((unused))
2096
+tcg_out_opc_vsadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2097
+{
2098
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_H, vd, vj, vk));
2099
+}
2100
+
2101
+/* Emits the `vsadd.w vd, vj, vk` instruction. */
2102
+static void __attribute__((unused))
2103
+tcg_out_opc_vsadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2104
+{
2105
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_W, vd, vj, vk));
2106
+}
2107
+
2108
+/* Emits the `vsadd.d vd, vj, vk` instruction. */
2109
+static void __attribute__((unused))
2110
+tcg_out_opc_vsadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2111
+{
2112
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_D, vd, vj, vk));
2113
+}
2114
+
2115
+/* Emits the `vssub.b vd, vj, vk` instruction. */
2116
+static void __attribute__((unused))
2117
+tcg_out_opc_vssub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2118
+{
2119
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_B, vd, vj, vk));
2120
+}
2121
+
2122
+/* Emits the `vssub.h vd, vj, vk` instruction. */
2123
+static void __attribute__((unused))
2124
+tcg_out_opc_vssub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2125
+{
2126
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_H, vd, vj, vk));
2127
+}
2128
+
2129
+/* Emits the `vssub.w vd, vj, vk` instruction. */
2130
+static void __attribute__((unused))
2131
+tcg_out_opc_vssub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2132
+{
2133
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_W, vd, vj, vk));
2134
+}
2135
+
2136
+/* Emits the `vssub.d vd, vj, vk` instruction. */
2137
+static void __attribute__((unused))
2138
+tcg_out_opc_vssub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2139
+{
2140
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_D, vd, vj, vk));
2141
+}
2142
+
2143
+/* Emits the `vsadd.bu vd, vj, vk` instruction. */
2144
+static void __attribute__((unused))
2145
+tcg_out_opc_vsadd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2146
+{
2147
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_BU, vd, vj, vk));
2148
+}
2149
+
2150
+/* Emits the `vsadd.hu vd, vj, vk` instruction. */
2151
+static void __attribute__((unused))
2152
+tcg_out_opc_vsadd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2153
+{
2154
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_HU, vd, vj, vk));
2155
+}
2156
+
2157
+/* Emits the `vsadd.wu vd, vj, vk` instruction. */
2158
+static void __attribute__((unused))
2159
+tcg_out_opc_vsadd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2160
+{
2161
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_WU, vd, vj, vk));
2162
+}
2163
+
2164
+/* Emits the `vsadd.du vd, vj, vk` instruction. */
2165
+static void __attribute__((unused))
2166
+tcg_out_opc_vsadd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2167
+{
2168
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_DU, vd, vj, vk));
2169
+}
2170
+
2171
+/* Emits the `vssub.bu vd, vj, vk` instruction. */
2172
+static void __attribute__((unused))
2173
+tcg_out_opc_vssub_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2174
+{
2175
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_BU, vd, vj, vk));
2176
+}
2177
+
2178
+/* Emits the `vssub.hu vd, vj, vk` instruction. */
2179
+static void __attribute__((unused))
2180
+tcg_out_opc_vssub_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2181
+{
2182
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_HU, vd, vj, vk));
2183
+}
2184
+
2185
+/* Emits the `vssub.wu vd, vj, vk` instruction. */
2186
+static void __attribute__((unused))
2187
+tcg_out_opc_vssub_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2188
+{
2189
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_WU, vd, vj, vk));
2190
+}
2191
+
2192
+/* Emits the `vssub.du vd, vj, vk` instruction. */
2193
+static void __attribute__((unused))
2194
+tcg_out_opc_vssub_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2195
+{
2196
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_DU, vd, vj, vk));
2197
+}
2198
+
2199
+/* Emits the `vhaddw.h.b vd, vj, vk` instruction. */
2200
+static void __attribute__((unused))
2201
+tcg_out_opc_vhaddw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2202
+{
2203
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_H_B, vd, vj, vk));
2204
+}
2205
+
2206
+/* Emits the `vhaddw.w.h vd, vj, vk` instruction. */
2207
+static void __attribute__((unused))
2208
+tcg_out_opc_vhaddw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2209
+{
2210
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_W_H, vd, vj, vk));
2211
+}
2212
+
2213
+/* Emits the `vhaddw.d.w vd, vj, vk` instruction. */
2214
+static void __attribute__((unused))
2215
+tcg_out_opc_vhaddw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2216
+{
2217
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_D_W, vd, vj, vk));
2218
+}
2219
+
2220
+/* Emits the `vhaddw.q.d vd, vj, vk` instruction. */
2221
+static void __attribute__((unused))
2222
+tcg_out_opc_vhaddw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2223
+{
2224
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_Q_D, vd, vj, vk));
2225
+}
2226
+
2227
+/* Emits the `vhsubw.h.b vd, vj, vk` instruction. */
2228
+static void __attribute__((unused))
2229
+tcg_out_opc_vhsubw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2230
+{
2231
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_H_B, vd, vj, vk));
2232
+}
2233
+
2234
+/* Emits the `vhsubw.w.h vd, vj, vk` instruction. */
2235
+static void __attribute__((unused))
2236
+tcg_out_opc_vhsubw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2237
+{
2238
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_W_H, vd, vj, vk));
2239
+}
2240
+
2241
+/* Emits the `vhsubw.d.w vd, vj, vk` instruction. */
2242
+static void __attribute__((unused))
2243
+tcg_out_opc_vhsubw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2244
+{
2245
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_D_W, vd, vj, vk));
2246
+}
2247
+
2248
+/* Emits the `vhsubw.q.d vd, vj, vk` instruction. */
2249
+static void __attribute__((unused))
2250
+tcg_out_opc_vhsubw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2251
+{
2252
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_Q_D, vd, vj, vk));
2253
+}
2254
+
2255
+/* Emits the `vhaddw.hu.bu vd, vj, vk` instruction. */
2256
+static void __attribute__((unused))
2257
+tcg_out_opc_vhaddw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2258
+{
2259
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_HU_BU, vd, vj, vk));
2260
+}
2261
+
2262
+/* Emits the `vhaddw.wu.hu vd, vj, vk` instruction. */
2263
+static void __attribute__((unused))
2264
+tcg_out_opc_vhaddw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2265
+{
2266
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_WU_HU, vd, vj, vk));
2267
+}
2268
+
2269
+/* Emits the `vhaddw.du.wu vd, vj, vk` instruction. */
2270
+static void __attribute__((unused))
2271
+tcg_out_opc_vhaddw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2272
+{
2273
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_DU_WU, vd, vj, vk));
2274
+}
2275
+
2276
+/* Emits the `vhaddw.qu.du vd, vj, vk` instruction. */
2277
+static void __attribute__((unused))
2278
+tcg_out_opc_vhaddw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2279
+{
2280
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_QU_DU, vd, vj, vk));
2281
+}
2282
+
2283
+/* Emits the `vhsubw.hu.bu vd, vj, vk` instruction. */
2284
+static void __attribute__((unused))
2285
+tcg_out_opc_vhsubw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2286
+{
2287
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_HU_BU, vd, vj, vk));
2288
+}
2289
+
2290
+/* Emits the `vhsubw.wu.hu vd, vj, vk` instruction. */
2291
+static void __attribute__((unused))
2292
+tcg_out_opc_vhsubw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2293
+{
2294
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_WU_HU, vd, vj, vk));
2295
+}
2296
+
2297
+/* Emits the `vhsubw.du.wu vd, vj, vk` instruction. */
2298
+static void __attribute__((unused))
2299
+tcg_out_opc_vhsubw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2300
+{
2301
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_DU_WU, vd, vj, vk));
2302
+}
2303
+
2304
+/* Emits the `vhsubw.qu.du vd, vj, vk` instruction. */
2305
+static void __attribute__((unused))
2306
+tcg_out_opc_vhsubw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2307
+{
2308
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_QU_DU, vd, vj, vk));
2309
+}
2310
+
2311
+/* Emits the `vadda.b vd, vj, vk` instruction. */
2312
+static void __attribute__((unused))
2313
+tcg_out_opc_vadda_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2314
+{
2315
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_B, vd, vj, vk));
2316
+}
2317
+
2318
+/* Emits the `vadda.h vd, vj, vk` instruction. */
2319
+static void __attribute__((unused))
2320
+tcg_out_opc_vadda_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2321
+{
2322
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_H, vd, vj, vk));
2323
+}
2324
+
2325
+/* Emits the `vadda.w vd, vj, vk` instruction. */
2326
+static void __attribute__((unused))
2327
+tcg_out_opc_vadda_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2328
+{
2329
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_W, vd, vj, vk));
2330
+}
2331
+
2332
+/* Emits the `vadda.d vd, vj, vk` instruction. */
2333
+static void __attribute__((unused))
2334
+tcg_out_opc_vadda_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2335
+{
2336
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_D, vd, vj, vk));
2337
+}
2338
+
2339
+/* Emits the `vabsd.b vd, vj, vk` instruction. */
2340
+static void __attribute__((unused))
2341
+tcg_out_opc_vabsd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2342
+{
2343
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_B, vd, vj, vk));
2344
+}
2345
+
2346
+/* Emits the `vabsd.h vd, vj, vk` instruction. */
2347
+static void __attribute__((unused))
2348
+tcg_out_opc_vabsd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2349
+{
2350
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_H, vd, vj, vk));
2351
+}
2352
+
2353
+/* Emits the `vabsd.w vd, vj, vk` instruction. */
2354
+static void __attribute__((unused))
2355
+tcg_out_opc_vabsd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2356
+{
2357
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_W, vd, vj, vk));
2358
+}
2359
+
2360
+/* Emits the `vabsd.d vd, vj, vk` instruction. */
2361
+static void __attribute__((unused))
2362
+tcg_out_opc_vabsd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2363
+{
2364
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_D, vd, vj, vk));
2365
+}
2366
+
2367
+/* Emits the `vabsd.bu vd, vj, vk` instruction. */
2368
+static void __attribute__((unused))
2369
+tcg_out_opc_vabsd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2370
+{
2371
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_BU, vd, vj, vk));
2372
+}
2373
+
2374
+/* Emits the `vabsd.hu vd, vj, vk` instruction. */
2375
+static void __attribute__((unused))
2376
+tcg_out_opc_vabsd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2377
+{
2378
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_HU, vd, vj, vk));
2379
+}
2380
+
2381
+/* Emits the `vabsd.wu vd, vj, vk` instruction. */
2382
+static void __attribute__((unused))
2383
+tcg_out_opc_vabsd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2384
+{
2385
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_WU, vd, vj, vk));
2386
+}
2387
+
2388
+/* Emits the `vabsd.du vd, vj, vk` instruction. */
2389
+static void __attribute__((unused))
2390
+tcg_out_opc_vabsd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2391
+{
2392
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_DU, vd, vj, vk));
2393
+}
2394
+
2395
+/* Emits the `vavg.b vd, vj, vk` instruction. */
2396
+static void __attribute__((unused))
2397
+tcg_out_opc_vavg_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2398
+{
2399
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_B, vd, vj, vk));
2400
+}
2401
+
2402
+/* Emits the `vavg.h vd, vj, vk` instruction. */
2403
+static void __attribute__((unused))
2404
+tcg_out_opc_vavg_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2405
+{
2406
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_H, vd, vj, vk));
2407
+}
2408
+
2409
+/* Emits the `vavg.w vd, vj, vk` instruction. */
2410
+static void __attribute__((unused))
2411
+tcg_out_opc_vavg_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2412
+{
2413
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_W, vd, vj, vk));
2414
+}
2415
+
2416
+/* Emits the `vavg.d vd, vj, vk` instruction. */
2417
+static void __attribute__((unused))
2418
+tcg_out_opc_vavg_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2419
+{
2420
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_D, vd, vj, vk));
2421
+}
2422
+
2423
+/* Emits the `vavg.bu vd, vj, vk` instruction. */
2424
+static void __attribute__((unused))
2425
+tcg_out_opc_vavg_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2426
+{
2427
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_BU, vd, vj, vk));
2428
+}
2429
+
2430
+/* Emits the `vavg.hu vd, vj, vk` instruction. */
2431
+static void __attribute__((unused))
2432
+tcg_out_opc_vavg_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2433
+{
2434
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_HU, vd, vj, vk));
2435
+}
2436
+
2437
+/* Emits the `vavg.wu vd, vj, vk` instruction. */
2438
+static void __attribute__((unused))
2439
+tcg_out_opc_vavg_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2440
+{
2441
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_WU, vd, vj, vk));
2442
+}
2443
+
2444
+/* Emits the `vavg.du vd, vj, vk` instruction. */
2445
+static void __attribute__((unused))
2446
+tcg_out_opc_vavg_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2447
+{
2448
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_DU, vd, vj, vk));
2449
+}
2450
+
2451
+/* Emits the `vavgr.b vd, vj, vk` instruction. */
2452
+static void __attribute__((unused))
2453
+tcg_out_opc_vavgr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2454
+{
2455
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_B, vd, vj, vk));
2456
+}
2457
+
2458
+/* Emits the `vavgr.h vd, vj, vk` instruction. */
2459
+static void __attribute__((unused))
2460
+tcg_out_opc_vavgr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2461
+{
2462
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_H, vd, vj, vk));
2463
+}
2464
+
2465
+/* Emits the `vavgr.w vd, vj, vk` instruction. */
2466
+static void __attribute__((unused))
2467
+tcg_out_opc_vavgr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2468
+{
2469
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_W, vd, vj, vk));
2470
+}
2471
+
2472
+/* Emits the `vavgr.d vd, vj, vk` instruction. */
2473
+static void __attribute__((unused))
2474
+tcg_out_opc_vavgr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2475
+{
2476
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_D, vd, vj, vk));
2477
+}
2478
+
2479
+/* Emits the `vavgr.bu vd, vj, vk` instruction. */
2480
+static void __attribute__((unused))
2481
+tcg_out_opc_vavgr_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2482
+{
2483
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_BU, vd, vj, vk));
2484
+}
2485
+
2486
+/* Emits the `vavgr.hu vd, vj, vk` instruction. */
2487
+static void __attribute__((unused))
2488
+tcg_out_opc_vavgr_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2489
+{
2490
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_HU, vd, vj, vk));
2491
+}
2492
+
2493
+/* Emits the `vavgr.wu vd, vj, vk` instruction. */
2494
+static void __attribute__((unused))
2495
+tcg_out_opc_vavgr_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2496
+{
2497
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_WU, vd, vj, vk));
2498
+}
2499
+
2500
+/* Emits the `vavgr.du vd, vj, vk` instruction. */
2501
+static void __attribute__((unused))
2502
+tcg_out_opc_vavgr_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2503
+{
2504
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_DU, vd, vj, vk));
2505
+}
2506
+
2507
+/* Emits the `vmax.b vd, vj, vk` instruction. */
2508
+static void __attribute__((unused))
2509
+tcg_out_opc_vmax_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2510
+{
2511
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_B, vd, vj, vk));
2512
+}
2513
+
2514
+/* Emits the `vmax.h vd, vj, vk` instruction. */
2515
+static void __attribute__((unused))
2516
+tcg_out_opc_vmax_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2517
+{
2518
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_H, vd, vj, vk));
2519
+}
2520
+
2521
+/* Emits the `vmax.w vd, vj, vk` instruction. */
2522
+static void __attribute__((unused))
2523
+tcg_out_opc_vmax_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2524
+{
2525
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_W, vd, vj, vk));
2526
+}
2527
+
2528
+/* Emits the `vmax.d vd, vj, vk` instruction. */
2529
+static void __attribute__((unused))
2530
+tcg_out_opc_vmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2531
+{
2532
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_D, vd, vj, vk));
2533
+}
2534
+
2535
+/* Emits the `vmin.b vd, vj, vk` instruction. */
2536
+static void __attribute__((unused))
2537
+tcg_out_opc_vmin_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2538
+{
2539
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_B, vd, vj, vk));
2540
+}
2541
+
2542
+/* Emits the `vmin.h vd, vj, vk` instruction. */
2543
+static void __attribute__((unused))
2544
+tcg_out_opc_vmin_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2545
+{
2546
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_H, vd, vj, vk));
2547
+}
2548
+
2549
+/* Emits the `vmin.w vd, vj, vk` instruction. */
2550
+static void __attribute__((unused))
2551
+tcg_out_opc_vmin_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2552
+{
2553
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_W, vd, vj, vk));
2554
+}
2555
+
2556
+/* Emits the `vmin.d vd, vj, vk` instruction. */
2557
+static void __attribute__((unused))
2558
+tcg_out_opc_vmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2559
+{
2560
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_D, vd, vj, vk));
2561
+}
2562
+
2563
+/* Emits the `vmax.bu vd, vj, vk` instruction. */
2564
+static void __attribute__((unused))
2565
+tcg_out_opc_vmax_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2566
+{
2567
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_BU, vd, vj, vk));
2568
+}
2569
+
2570
+/* Emits the `vmax.hu vd, vj, vk` instruction. */
2571
+static void __attribute__((unused))
2572
+tcg_out_opc_vmax_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2573
+{
2574
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_HU, vd, vj, vk));
2575
+}
2576
+
2577
+/* Emits the `vmax.wu vd, vj, vk` instruction. */
2578
+static void __attribute__((unused))
2579
+tcg_out_opc_vmax_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2580
+{
2581
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_WU, vd, vj, vk));
2582
+}
2583
+
2584
+/* Emits the `vmax.du vd, vj, vk` instruction. */
2585
+static void __attribute__((unused))
2586
+tcg_out_opc_vmax_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2587
+{
2588
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_DU, vd, vj, vk));
2589
+}
2590
+
2591
+/* Emits the `vmin.bu vd, vj, vk` instruction. */
2592
+static void __attribute__((unused))
2593
+tcg_out_opc_vmin_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2594
+{
2595
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_BU, vd, vj, vk));
2596
+}
2597
+
2598
+/* Emits the `vmin.hu vd, vj, vk` instruction. */
2599
+static void __attribute__((unused))
2600
+tcg_out_opc_vmin_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2601
+{
2602
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_HU, vd, vj, vk));
2603
+}
2604
+
2605
+/* Emits the `vmin.wu vd, vj, vk` instruction. */
2606
+static void __attribute__((unused))
2607
+tcg_out_opc_vmin_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2608
+{
2609
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_WU, vd, vj, vk));
2610
+}
2611
+
2612
+/* Emits the `vmin.du vd, vj, vk` instruction. */
2613
+static void __attribute__((unused))
2614
+tcg_out_opc_vmin_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2615
+{
2616
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_DU, vd, vj, vk));
2617
+}
2618
+
2619
+/* Emits the `vmul.b vd, vj, vk` instruction. */
2620
+static void __attribute__((unused))
2621
+tcg_out_opc_vmul_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2622
+{
2623
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_B, vd, vj, vk));
2624
+}
2625
+
2626
+/* Emits the `vmul.h vd, vj, vk` instruction. */
2627
+static void __attribute__((unused))
2628
+tcg_out_opc_vmul_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2629
+{
2630
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_H, vd, vj, vk));
2631
+}
2632
+
2633
+/* Emits the `vmul.w vd, vj, vk` instruction. */
2634
+static void __attribute__((unused))
2635
+tcg_out_opc_vmul_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2636
+{
2637
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_W, vd, vj, vk));
2638
+}
2639
+
2640
+/* Emits the `vmul.d vd, vj, vk` instruction. */
2641
+static void __attribute__((unused))
2642
+tcg_out_opc_vmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2643
+{
2644
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_D, vd, vj, vk));
2645
+}
2646
+
2647
+/* Emits the `vmuh.b vd, vj, vk` instruction. */
2648
+static void __attribute__((unused))
2649
+tcg_out_opc_vmuh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2650
+{
2651
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_B, vd, vj, vk));
2652
+}
2653
+
2654
+/* Emits the `vmuh.h vd, vj, vk` instruction. */
2655
+static void __attribute__((unused))
2656
+tcg_out_opc_vmuh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2657
+{
2658
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_H, vd, vj, vk));
2659
+}
2660
+
2661
+/* Emits the `vmuh.w vd, vj, vk` instruction. */
2662
+static void __attribute__((unused))
2663
+tcg_out_opc_vmuh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2664
+{
2665
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_W, vd, vj, vk));
2666
+}
2667
+
2668
+/* Emits the `vmuh.d vd, vj, vk` instruction. */
2669
+static void __attribute__((unused))
2670
+tcg_out_opc_vmuh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2671
+{
2672
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_D, vd, vj, vk));
2673
+}
2674
+
2675
+/* Emits the `vmuh.bu vd, vj, vk` instruction. */
2676
+static void __attribute__((unused))
2677
+tcg_out_opc_vmuh_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2678
+{
2679
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_BU, vd, vj, vk));
2680
+}
2681
+
2682
+/* Emits the `vmuh.hu vd, vj, vk` instruction. */
2683
+static void __attribute__((unused))
2684
+tcg_out_opc_vmuh_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2685
+{
2686
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_HU, vd, vj, vk));
2687
+}
2688
+
2689
+/* Emits the `vmuh.wu vd, vj, vk` instruction. */
2690
+static void __attribute__((unused))
2691
+tcg_out_opc_vmuh_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2692
+{
2693
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_WU, vd, vj, vk));
2694
+}
2695
+
2696
+/* Emits the `vmuh.du vd, vj, vk` instruction. */
2697
+static void __attribute__((unused))
2698
+tcg_out_opc_vmuh_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2699
+{
2700
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_DU, vd, vj, vk));
2701
+}
2702
+
2703
+/* Emits the `vmulwev.h.b vd, vj, vk` instruction. */
2704
+static void __attribute__((unused))
2705
+tcg_out_opc_vmulwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2706
+{
2707
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_B, vd, vj, vk));
2708
+}
2709
+
2710
+/* Emits the `vmulwev.w.h vd, vj, vk` instruction. */
2711
+static void __attribute__((unused))
2712
+tcg_out_opc_vmulwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2713
+{
2714
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_H, vd, vj, vk));
2715
+}
2716
+
2717
+/* Emits the `vmulwev.d.w vd, vj, vk` instruction. */
2718
+static void __attribute__((unused))
2719
+tcg_out_opc_vmulwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2720
+{
2721
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_W, vd, vj, vk));
2722
+}
2723
+
2724
+/* Emits the `vmulwev.q.d vd, vj, vk` instruction. */
2725
+static void __attribute__((unused))
2726
+tcg_out_opc_vmulwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2727
+{
2728
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_D, vd, vj, vk));
2729
+}
2730
+
2731
+/* Emits the `vmulwod.h.b vd, vj, vk` instruction. */
2732
+static void __attribute__((unused))
2733
+tcg_out_opc_vmulwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2734
+{
2735
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_B, vd, vj, vk));
2736
+}
2737
+
2738
+/* Emits the `vmulwod.w.h vd, vj, vk` instruction. */
2739
+static void __attribute__((unused))
2740
+tcg_out_opc_vmulwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2741
+{
2742
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_H, vd, vj, vk));
2743
+}
2744
+
2745
+/* Emits the `vmulwod.d.w vd, vj, vk` instruction. */
2746
+static void __attribute__((unused))
2747
+tcg_out_opc_vmulwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2748
+{
2749
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_W, vd, vj, vk));
2750
+}
2751
+
2752
+/* Emits the `vmulwod.q.d vd, vj, vk` instruction. */
2753
+static void __attribute__((unused))
2754
+tcg_out_opc_vmulwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2755
+{
2756
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_D, vd, vj, vk));
2757
+}
2758
+
2759
+/* Emits the `vmulwev.h.bu vd, vj, vk` instruction. */
2760
+static void __attribute__((unused))
2761
+tcg_out_opc_vmulwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2762
+{
2763
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU, vd, vj, vk));
2764
+}
2765
+
2766
+/* Emits the `vmulwev.w.hu vd, vj, vk` instruction. */
2767
+static void __attribute__((unused))
2768
+tcg_out_opc_vmulwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2769
+{
2770
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU, vd, vj, vk));
2771
+}
2772
+
2773
+/* Emits the `vmulwev.d.wu vd, vj, vk` instruction. */
2774
+static void __attribute__((unused))
2775
+tcg_out_opc_vmulwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2776
+{
2777
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU, vd, vj, vk));
2778
+}
2779
+
2780
+/* Emits the `vmulwev.q.du vd, vj, vk` instruction. */
2781
+static void __attribute__((unused))
2782
+tcg_out_opc_vmulwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2783
+{
2784
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU, vd, vj, vk));
2785
+}
2786
+
2787
+/* Emits the `vmulwod.h.bu vd, vj, vk` instruction. */
2788
+static void __attribute__((unused))
2789
+tcg_out_opc_vmulwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2790
+{
2791
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU, vd, vj, vk));
2792
+}
2793
+
2794
+/* Emits the `vmulwod.w.hu vd, vj, vk` instruction. */
2795
+static void __attribute__((unused))
2796
+tcg_out_opc_vmulwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2797
+{
2798
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU, vd, vj, vk));
2799
+}
2800
+
2801
+/* Emits the `vmulwod.d.wu vd, vj, vk` instruction. */
2802
+static void __attribute__((unused))
2803
+tcg_out_opc_vmulwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2804
+{
2805
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU, vd, vj, vk));
2806
+}
2807
+
2808
+/* Emits the `vmulwod.q.du vd, vj, vk` instruction. */
2809
+static void __attribute__((unused))
2810
+tcg_out_opc_vmulwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2811
+{
2812
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU, vd, vj, vk));
2813
+}
2814
+
2815
+/* Emits the `vmulwev.h.bu.b vd, vj, vk` instruction. */
2816
+static void __attribute__((unused))
2817
+tcg_out_opc_vmulwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2818
+{
2819
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU_B, vd, vj, vk));
2820
+}
2821
+
2822
+/* Emits the `vmulwev.w.hu.h vd, vj, vk` instruction. */
2823
+static void __attribute__((unused))
2824
+tcg_out_opc_vmulwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2825
+{
2826
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU_H, vd, vj, vk));
2827
+}
2828
+
2829
+/* Emits the `vmulwev.d.wu.w vd, vj, vk` instruction. */
2830
+static void __attribute__((unused))
2831
+tcg_out_opc_vmulwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2832
+{
2833
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU_W, vd, vj, vk));
2834
+}
2835
+
2836
+/* Emits the `vmulwev.q.du.d vd, vj, vk` instruction. */
2837
+static void __attribute__((unused))
2838
+tcg_out_opc_vmulwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2839
+{
2840
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU_D, vd, vj, vk));
2841
+}
2842
+
2843
+/* Emits the `vmulwod.h.bu.b vd, vj, vk` instruction. */
2844
+static void __attribute__((unused))
2845
+tcg_out_opc_vmulwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2846
+{
2847
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU_B, vd, vj, vk));
2848
+}
2849
+
2850
+/* Emits the `vmulwod.w.hu.h vd, vj, vk` instruction. */
2851
+static void __attribute__((unused))
2852
+tcg_out_opc_vmulwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2853
+{
2854
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU_H, vd, vj, vk));
2855
+}
2856
+
2857
+/* Emits the `vmulwod.d.wu.w vd, vj, vk` instruction. */
2858
+static void __attribute__((unused))
2859
+tcg_out_opc_vmulwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2860
+{
2861
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU_W, vd, vj, vk));
2862
+}
2863
+
2864
+/* Emits the `vmulwod.q.du.d vd, vj, vk` instruction. */
2865
+static void __attribute__((unused))
2866
+tcg_out_opc_vmulwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2867
+{
2868
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU_D, vd, vj, vk));
2869
+}
2870
+
2871
+/* Emits the `vmadd.b vd, vj, vk` instruction. */
2872
+static void __attribute__((unused))
2873
+tcg_out_opc_vmadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2874
+{
2875
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_B, vd, vj, vk));
2876
+}
2877
+
2878
+/* Emits the `vmadd.h vd, vj, vk` instruction. */
2879
+static void __attribute__((unused))
2880
+tcg_out_opc_vmadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2881
+{
2882
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_H, vd, vj, vk));
2883
+}
2884
+
2885
+/* Emits the `vmadd.w vd, vj, vk` instruction. */
2886
+static void __attribute__((unused))
2887
+tcg_out_opc_vmadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2888
+{
2889
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_W, vd, vj, vk));
2890
+}
2891
+
2892
+/* Emits the `vmadd.d vd, vj, vk` instruction. */
2893
+static void __attribute__((unused))
2894
+tcg_out_opc_vmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2895
+{
2896
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_D, vd, vj, vk));
2897
+}
2898
+
2899
+/* Emits the `vmsub.b vd, vj, vk` instruction. */
2900
+static void __attribute__((unused))
2901
+tcg_out_opc_vmsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2902
+{
2903
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_B, vd, vj, vk));
2904
+}
2905
+
2906
+/* Emits the `vmsub.h vd, vj, vk` instruction. */
2907
+static void __attribute__((unused))
2908
+tcg_out_opc_vmsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2909
+{
2910
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_H, vd, vj, vk));
2911
+}
2912
+
2913
+/* Emits the `vmsub.w vd, vj, vk` instruction. */
2914
+static void __attribute__((unused))
2915
+tcg_out_opc_vmsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2916
+{
2917
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_W, vd, vj, vk));
2918
+}
2919
+
2920
+/* Emits the `vmsub.d vd, vj, vk` instruction. */
2921
+static void __attribute__((unused))
2922
+tcg_out_opc_vmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2923
+{
2924
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_D, vd, vj, vk));
2925
+}
2926
+
2927
+/* Emits the `vmaddwev.h.b vd, vj, vk` instruction. */
2928
+static void __attribute__((unused))
2929
+tcg_out_opc_vmaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2930
+{
2931
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_B, vd, vj, vk));
2932
+}
2933
+
2934
+/* Emits the `vmaddwev.w.h vd, vj, vk` instruction. */
2935
+static void __attribute__((unused))
2936
+tcg_out_opc_vmaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2937
+{
2938
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_H, vd, vj, vk));
2939
+}
2940
+
2941
+/* Emits the `vmaddwev.d.w vd, vj, vk` instruction. */
2942
+static void __attribute__((unused))
2943
+tcg_out_opc_vmaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2944
+{
2945
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_W, vd, vj, vk));
2946
+}
2947
+
2948
+/* Emits the `vmaddwev.q.d vd, vj, vk` instruction. */
2949
+static void __attribute__((unused))
2950
+tcg_out_opc_vmaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2951
+{
2952
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_D, vd, vj, vk));
2953
+}
2954
+
2955
+/* Emits the `vmaddwod.h.b vd, vj, vk` instruction. */
2956
+static void __attribute__((unused))
2957
+tcg_out_opc_vmaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2958
+{
2959
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_B, vd, vj, vk));
2960
+}
2961
+
2962
+/* Emits the `vmaddwod.w.h vd, vj, vk` instruction. */
2963
+static void __attribute__((unused))
2964
+tcg_out_opc_vmaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2965
+{
2966
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_H, vd, vj, vk));
2967
+}
2968
+
2969
+/* Emits the `vmaddwod.d.w vd, vj, vk` instruction. */
2970
+static void __attribute__((unused))
2971
+tcg_out_opc_vmaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2972
+{
2973
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_W, vd, vj, vk));
2974
+}
2975
+
2976
+/* Emits the `vmaddwod.q.d vd, vj, vk` instruction. */
2977
+static void __attribute__((unused))
2978
+tcg_out_opc_vmaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2979
+{
2980
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_D, vd, vj, vk));
2981
+}
2982
+
2983
+/* Emits the `vmaddwev.h.bu vd, vj, vk` instruction. */
2984
+static void __attribute__((unused))
2985
+tcg_out_opc_vmaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2986
+{
2987
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU, vd, vj, vk));
2988
+}
2989
+
2990
+/* Emits the `vmaddwev.w.hu vd, vj, vk` instruction. */
2991
+static void __attribute__((unused))
2992
+tcg_out_opc_vmaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2993
+{
2994
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU, vd, vj, vk));
2995
+}
2996
+
2997
+/* Emits the `vmaddwev.d.wu vd, vj, vk` instruction. */
2998
+static void __attribute__((unused))
2999
+tcg_out_opc_vmaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3000
+{
3001
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU, vd, vj, vk));
3002
+}
3003
+
3004
+/* Emits the `vmaddwev.q.du vd, vj, vk` instruction. */
3005
+static void __attribute__((unused))
3006
+tcg_out_opc_vmaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3007
+{
3008
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU, vd, vj, vk));
3009
+}
3010
+
3011
+/* Emits the `vmaddwod.h.bu vd, vj, vk` instruction. */
3012
+static void __attribute__((unused))
3013
+tcg_out_opc_vmaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3014
+{
3015
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU, vd, vj, vk));
3016
+}
3017
+
3018
+/* Emits the `vmaddwod.w.hu vd, vj, vk` instruction. */
3019
+static void __attribute__((unused))
3020
+tcg_out_opc_vmaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3021
+{
3022
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU, vd, vj, vk));
3023
+}
3024
+
3025
+/* Emits the `vmaddwod.d.wu vd, vj, vk` instruction. */
3026
+static void __attribute__((unused))
3027
+tcg_out_opc_vmaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3028
+{
3029
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU, vd, vj, vk));
3030
+}
3031
+
3032
+/* Emits the `vmaddwod.q.du vd, vj, vk` instruction. */
3033
+static void __attribute__((unused))
3034
+tcg_out_opc_vmaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3035
+{
3036
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU, vd, vj, vk));
3037
+}
3038
+
3039
+/* Emits the `vmaddwev.h.bu.b vd, vj, vk` instruction. */
3040
+static void __attribute__((unused))
3041
+tcg_out_opc_vmaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3042
+{
3043
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU_B, vd, vj, vk));
3044
+}
3045
+
3046
+/* Emits the `vmaddwev.w.hu.h vd, vj, vk` instruction. */
3047
+static void __attribute__((unused))
3048
+tcg_out_opc_vmaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3049
+{
3050
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU_H, vd, vj, vk));
3051
+}
3052
+
3053
+/* Emits the `vmaddwev.d.wu.w vd, vj, vk` instruction. */
3054
+static void __attribute__((unused))
3055
+tcg_out_opc_vmaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3056
+{
3057
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU_W, vd, vj, vk));
3058
+}
3059
+
3060
+/* Emits the `vmaddwev.q.du.d vd, vj, vk` instruction. */
3061
+static void __attribute__((unused))
3062
+tcg_out_opc_vmaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3063
+{
3064
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU_D, vd, vj, vk));
3065
+}
3066
+
3067
+/* Emits the `vmaddwod.h.bu.b vd, vj, vk` instruction. */
3068
+static void __attribute__((unused))
3069
+tcg_out_opc_vmaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3070
+{
3071
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU_B, vd, vj, vk));
3072
+}
3073
+
3074
+/* Emits the `vmaddwod.w.hu.h vd, vj, vk` instruction. */
3075
+static void __attribute__((unused))
3076
+tcg_out_opc_vmaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3077
+{
3078
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU_H, vd, vj, vk));
3079
+}
3080
+
3081
+/* Emits the `vmaddwod.d.wu.w vd, vj, vk` instruction. */
3082
+static void __attribute__((unused))
3083
+tcg_out_opc_vmaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3084
+{
3085
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU_W, vd, vj, vk));
3086
+}
3087
+
3088
+/* Emits the `vmaddwod.q.du.d vd, vj, vk` instruction. */
3089
+static void __attribute__((unused))
3090
+tcg_out_opc_vmaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3091
+{
3092
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU_D, vd, vj, vk));
3093
+}
3094
+
3095
+/* Emits the `vdiv.b vd, vj, vk` instruction. */
3096
+static void __attribute__((unused))
3097
+tcg_out_opc_vdiv_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3098
+{
3099
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_B, vd, vj, vk));
3100
+}
3101
+
3102
+/* Emits the `vdiv.h vd, vj, vk` instruction. */
3103
+static void __attribute__((unused))
3104
+tcg_out_opc_vdiv_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3105
+{
3106
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_H, vd, vj, vk));
3107
+}
3108
+
3109
+/* Emits the `vdiv.w vd, vj, vk` instruction. */
3110
+static void __attribute__((unused))
3111
+tcg_out_opc_vdiv_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3112
+{
3113
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_W, vd, vj, vk));
3114
+}
3115
+
3116
+/* Emits the `vdiv.d vd, vj, vk` instruction. */
3117
+static void __attribute__((unused))
3118
+tcg_out_opc_vdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3119
+{
3120
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_D, vd, vj, vk));
3121
+}
3122
+
3123
+/* Emits the `vmod.b vd, vj, vk` instruction. */
3124
+static void __attribute__((unused))
3125
+tcg_out_opc_vmod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3126
+{
3127
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_B, vd, vj, vk));
3128
+}
3129
+
3130
+/* Emits the `vmod.h vd, vj, vk` instruction. */
3131
+static void __attribute__((unused))
3132
+tcg_out_opc_vmod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3133
+{
3134
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_H, vd, vj, vk));
3135
+}
3136
+
3137
+/* Emits the `vmod.w vd, vj, vk` instruction. */
3138
+static void __attribute__((unused))
3139
+tcg_out_opc_vmod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3140
+{
3141
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_W, vd, vj, vk));
3142
+}
3143
+
3144
+/* Emits the `vmod.d vd, vj, vk` instruction. */
3145
+static void __attribute__((unused))
3146
+tcg_out_opc_vmod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3147
+{
3148
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_D, vd, vj, vk));
3149
+}
3150
+
3151
+/* Emits the `vdiv.bu vd, vj, vk` instruction. */
3152
+static void __attribute__((unused))
3153
+tcg_out_opc_vdiv_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3154
+{
3155
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_BU, vd, vj, vk));
3156
+}
3157
+
3158
+/* Emits the `vdiv.hu vd, vj, vk` instruction. */
3159
+static void __attribute__((unused))
3160
+tcg_out_opc_vdiv_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3161
+{
3162
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_HU, vd, vj, vk));
3163
+}
3164
+
3165
+/* Emits the `vdiv.wu vd, vj, vk` instruction. */
3166
+static void __attribute__((unused))
3167
+tcg_out_opc_vdiv_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3168
+{
3169
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_WU, vd, vj, vk));
3170
+}
3171
+
3172
+/* Emits the `vdiv.du vd, vj, vk` instruction. */
3173
+static void __attribute__((unused))
3174
+tcg_out_opc_vdiv_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3175
+{
3176
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_DU, vd, vj, vk));
3177
+}
3178
+
3179
+/* Emits the `vmod.bu vd, vj, vk` instruction. */
3180
+static void __attribute__((unused))
3181
+tcg_out_opc_vmod_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3182
+{
3183
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_BU, vd, vj, vk));
3184
+}
3185
+
3186
+/* Emits the `vmod.hu vd, vj, vk` instruction. */
3187
+static void __attribute__((unused))
3188
+tcg_out_opc_vmod_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3189
+{
3190
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_HU, vd, vj, vk));
3191
+}
3192
+
3193
+/* Emits the `vmod.wu vd, vj, vk` instruction. */
3194
+static void __attribute__((unused))
3195
+tcg_out_opc_vmod_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3196
+{
3197
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_WU, vd, vj, vk));
3198
+}
3199
+
3200
+/* Emits the `vmod.du vd, vj, vk` instruction. */
3201
+static void __attribute__((unused))
3202
+tcg_out_opc_vmod_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3203
+{
3204
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_DU, vd, vj, vk));
3205
+}
3206
+
3207
+/* Emits the `vsll.b vd, vj, vk` instruction. */
3208
+static void __attribute__((unused))
3209
+tcg_out_opc_vsll_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3210
+{
3211
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_B, vd, vj, vk));
3212
+}
3213
+
3214
+/* Emits the `vsll.h vd, vj, vk` instruction. */
3215
+static void __attribute__((unused))
3216
+tcg_out_opc_vsll_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3217
+{
3218
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_H, vd, vj, vk));
3219
+}
3220
+
3221
+/* Emits the `vsll.w vd, vj, vk` instruction. */
3222
+static void __attribute__((unused))
3223
+tcg_out_opc_vsll_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3224
+{
3225
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_W, vd, vj, vk));
3226
+}
3227
+
3228
+/* Emits the `vsll.d vd, vj, vk` instruction. */
3229
+static void __attribute__((unused))
3230
+tcg_out_opc_vsll_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3231
+{
3232
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_D, vd, vj, vk));
3233
+}
3234
+
3235
+/* Emits the `vsrl.b vd, vj, vk` instruction. */
3236
+static void __attribute__((unused))
3237
+tcg_out_opc_vsrl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3238
+{
3239
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_B, vd, vj, vk));
3240
+}
3241
+
3242
+/* Emits the `vsrl.h vd, vj, vk` instruction. */
3243
+static void __attribute__((unused))
3244
+tcg_out_opc_vsrl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3245
+{
3246
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_H, vd, vj, vk));
3247
+}
3248
+
3249
+/* Emits the `vsrl.w vd, vj, vk` instruction. */
3250
+static void __attribute__((unused))
3251
+tcg_out_opc_vsrl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3252
+{
3253
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_W, vd, vj, vk));
3254
+}
3255
+
3256
+/* Emits the `vsrl.d vd, vj, vk` instruction. */
3257
+static void __attribute__((unused))
3258
+tcg_out_opc_vsrl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3259
+{
3260
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_D, vd, vj, vk));
3261
+}
3262
+
3263
+/* Emits the `vsra.b vd, vj, vk` instruction. */
3264
+static void __attribute__((unused))
3265
+tcg_out_opc_vsra_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3266
+{
3267
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_B, vd, vj, vk));
3268
+}
3269
+
3270
+/* Emits the `vsra.h vd, vj, vk` instruction. */
3271
+static void __attribute__((unused))
3272
+tcg_out_opc_vsra_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3273
+{
3274
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_H, vd, vj, vk));
3275
+}
3276
+
3277
+/* Emits the `vsra.w vd, vj, vk` instruction. */
3278
+static void __attribute__((unused))
3279
+tcg_out_opc_vsra_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3280
+{
3281
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_W, vd, vj, vk));
3282
+}
3283
+
3284
+/* Emits the `vsra.d vd, vj, vk` instruction. */
3285
+static void __attribute__((unused))
3286
+tcg_out_opc_vsra_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3287
+{
3288
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_D, vd, vj, vk));
3289
+}
3290
+
3291
+/* Emits the `vrotr.b vd, vj, vk` instruction. */
3292
+static void __attribute__((unused))
3293
+tcg_out_opc_vrotr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3294
+{
3295
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_B, vd, vj, vk));
3296
+}
3297
+
3298
+/* Emits the `vrotr.h vd, vj, vk` instruction. */
3299
+static void __attribute__((unused))
3300
+tcg_out_opc_vrotr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3301
+{
3302
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_H, vd, vj, vk));
3303
+}
3304
+
3305
+/* Emits the `vrotr.w vd, vj, vk` instruction. */
3306
+static void __attribute__((unused))
3307
+tcg_out_opc_vrotr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3308
+{
3309
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_W, vd, vj, vk));
3310
+}
3311
+
3312
+/* Emits the `vrotr.d vd, vj, vk` instruction. */
3313
+static void __attribute__((unused))
3314
+tcg_out_opc_vrotr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3315
+{
3316
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_D, vd, vj, vk));
3317
+}
3318
+
3319
+/* Emits the `vsrlr.b vd, vj, vk` instruction. */
3320
+static void __attribute__((unused))
3321
+tcg_out_opc_vsrlr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3322
+{
3323
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_B, vd, vj, vk));
3324
+}
3325
+
3326
+/* Emits the `vsrlr.h vd, vj, vk` instruction. */
3327
+static void __attribute__((unused))
3328
+tcg_out_opc_vsrlr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3329
+{
3330
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_H, vd, vj, vk));
3331
+}
3332
+
3333
+/* Emits the `vsrlr.w vd, vj, vk` instruction. */
3334
+static void __attribute__((unused))
3335
+tcg_out_opc_vsrlr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3336
+{
3337
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_W, vd, vj, vk));
3338
+}
3339
+
3340
+/* Emits the `vsrlr.d vd, vj, vk` instruction. */
3341
+static void __attribute__((unused))
3342
+tcg_out_opc_vsrlr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3343
+{
3344
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_D, vd, vj, vk));
3345
+}
3346
+
3347
+/* Emits the `vsrar.b vd, vj, vk` instruction. */
3348
+static void __attribute__((unused))
3349
+tcg_out_opc_vsrar_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3350
+{
3351
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_B, vd, vj, vk));
3352
+}
3353
+
3354
+/* Emits the `vsrar.h vd, vj, vk` instruction. */
3355
+static void __attribute__((unused))
3356
+tcg_out_opc_vsrar_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3357
+{
3358
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_H, vd, vj, vk));
3359
+}
3360
+
3361
+/* Emits the `vsrar.w vd, vj, vk` instruction. */
3362
+static void __attribute__((unused))
3363
+tcg_out_opc_vsrar_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3364
+{
3365
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_W, vd, vj, vk));
3366
+}
3367
+
3368
+/* Emits the `vsrar.d vd, vj, vk` instruction. */
3369
+static void __attribute__((unused))
3370
+tcg_out_opc_vsrar_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3371
+{
3372
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_D, vd, vj, vk));
3373
+}
3374
+
3375
+/* Emits the `vsrln.b.h vd, vj, vk` instruction. */
3376
+static void __attribute__((unused))
3377
+tcg_out_opc_vsrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3378
+{
3379
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_B_H, vd, vj, vk));
3380
+}
3381
+
3382
+/* Emits the `vsrln.h.w vd, vj, vk` instruction. */
3383
+static void __attribute__((unused))
3384
+tcg_out_opc_vsrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3385
+{
3386
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_H_W, vd, vj, vk));
3387
+}
3388
+
3389
+/* Emits the `vsrln.w.d vd, vj, vk` instruction. */
3390
+static void __attribute__((unused))
3391
+tcg_out_opc_vsrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3392
+{
3393
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_W_D, vd, vj, vk));
3394
+}
3395
+
3396
+/* Emits the `vsran.b.h vd, vj, vk` instruction. */
3397
+static void __attribute__((unused))
3398
+tcg_out_opc_vsran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3399
+{
3400
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_B_H, vd, vj, vk));
3401
+}
3402
+
3403
+/* Emits the `vsran.h.w vd, vj, vk` instruction. */
3404
+static void __attribute__((unused))
3405
+tcg_out_opc_vsran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3406
+{
3407
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_H_W, vd, vj, vk));
3408
+}
3409
+
3410
+/* Emits the `vsran.w.d vd, vj, vk` instruction. */
3411
+static void __attribute__((unused))
3412
+tcg_out_opc_vsran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3413
+{
3414
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_W_D, vd, vj, vk));
3415
+}
3416
+
3417
+/* Emits the `vsrlrn.b.h vd, vj, vk` instruction. */
3418
+static void __attribute__((unused))
3419
+tcg_out_opc_vsrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3420
+{
3421
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_B_H, vd, vj, vk));
3422
+}
3423
+
3424
+/* Emits the `vsrlrn.h.w vd, vj, vk` instruction. */
3425
+static void __attribute__((unused))
3426
+tcg_out_opc_vsrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3427
+{
3428
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_H_W, vd, vj, vk));
3429
+}
3430
+
3431
+/* Emits the `vsrlrn.w.d vd, vj, vk` instruction. */
3432
+static void __attribute__((unused))
3433
+tcg_out_opc_vsrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3434
+{
3435
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_W_D, vd, vj, vk));
3436
+}
3437
+
3438
+/* Emits the `vsrarn.b.h vd, vj, vk` instruction. */
3439
+static void __attribute__((unused))
3440
+tcg_out_opc_vsrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3441
+{
3442
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_B_H, vd, vj, vk));
3443
+}
3444
+
3445
+/* Emits the `vsrarn.h.w vd, vj, vk` instruction. */
3446
+static void __attribute__((unused))
3447
+tcg_out_opc_vsrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3448
+{
3449
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_H_W, vd, vj, vk));
3450
+}
3451
+
3452
+/* Emits the `vsrarn.w.d vd, vj, vk` instruction. */
3453
+static void __attribute__((unused))
3454
+tcg_out_opc_vsrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3455
+{
3456
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_W_D, vd, vj, vk));
3457
+}
3458
+
3459
+/* Emits the `vssrln.b.h vd, vj, vk` instruction. */
3460
+static void __attribute__((unused))
3461
+tcg_out_opc_vssrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3462
+{
3463
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_B_H, vd, vj, vk));
3464
+}
3465
+
3466
+/* Emits the `vssrln.h.w vd, vj, vk` instruction. */
3467
+static void __attribute__((unused))
3468
+tcg_out_opc_vssrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3469
+{
3470
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_H_W, vd, vj, vk));
3471
+}
3472
+
3473
+/* Emits the `vssrln.w.d vd, vj, vk` instruction. */
3474
+static void __attribute__((unused))
3475
+tcg_out_opc_vssrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3476
+{
3477
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_W_D, vd, vj, vk));
3478
+}
3479
+
3480
+/* Emits the `vssran.b.h vd, vj, vk` instruction. */
3481
+static void __attribute__((unused))
3482
+tcg_out_opc_vssran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3483
+{
3484
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_B_H, vd, vj, vk));
3485
+}
3486
+
3487
+/* Emits the `vssran.h.w vd, vj, vk` instruction. */
3488
+static void __attribute__((unused))
3489
+tcg_out_opc_vssran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3490
+{
3491
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_H_W, vd, vj, vk));
3492
+}
3493
+
3494
+/* Emits the `vssran.w.d vd, vj, vk` instruction. */
3495
+static void __attribute__((unused))
3496
+tcg_out_opc_vssran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3497
+{
3498
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_W_D, vd, vj, vk));
3499
+}
3500
+
3501
+/* Emits the `vssrlrn.b.h vd, vj, vk` instruction. */
3502
+static void __attribute__((unused))
3503
+tcg_out_opc_vssrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3504
+{
3505
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_B_H, vd, vj, vk));
3506
+}
3507
+
3508
+/* Emits the `vssrlrn.h.w vd, vj, vk` instruction. */
3509
+static void __attribute__((unused))
3510
+tcg_out_opc_vssrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3511
+{
3512
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_H_W, vd, vj, vk));
3513
+}
3514
+
3515
+/* Emits the `vssrlrn.w.d vd, vj, vk` instruction. */
3516
+static void __attribute__((unused))
3517
+tcg_out_opc_vssrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3518
+{
3519
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_W_D, vd, vj, vk));
3520
+}
3521
+
3522
+/* Emits the `vssrarn.b.h vd, vj, vk` instruction. */
3523
+static void __attribute__((unused))
3524
+tcg_out_opc_vssrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3525
+{
3526
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_B_H, vd, vj, vk));
3527
+}
3528
+
3529
+/* Emits the `vssrarn.h.w vd, vj, vk` instruction. */
3530
+static void __attribute__((unused))
3531
+tcg_out_opc_vssrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3532
+{
3533
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_H_W, vd, vj, vk));
3534
+}
3535
+
3536
+/* Emits the `vssrarn.w.d vd, vj, vk` instruction. */
3537
+static void __attribute__((unused))
3538
+tcg_out_opc_vssrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3539
+{
3540
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_W_D, vd, vj, vk));
3541
+}
3542
+
3543
+/* Emits the `vssrln.bu.h vd, vj, vk` instruction. */
3544
+static void __attribute__((unused))
3545
+tcg_out_opc_vssrln_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3546
+{
3547
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_BU_H, vd, vj, vk));
3548
+}
3549
+
3550
+/* Emits the `vssrln.hu.w vd, vj, vk` instruction. */
3551
+static void __attribute__((unused))
3552
+tcg_out_opc_vssrln_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3553
+{
3554
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_HU_W, vd, vj, vk));
3555
+}
3556
+
3557
+/* Emits the `vssrln.wu.d vd, vj, vk` instruction. */
3558
+static void __attribute__((unused))
3559
+tcg_out_opc_vssrln_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3560
+{
3561
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_WU_D, vd, vj, vk));
3562
+}
3563
+
3564
+/* Emits the `vssran.bu.h vd, vj, vk` instruction. */
3565
+static void __attribute__((unused))
3566
+tcg_out_opc_vssran_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3567
+{
3568
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_BU_H, vd, vj, vk));
3569
+}
3570
+
3571
+/* Emits the `vssran.hu.w vd, vj, vk` instruction. */
3572
+static void __attribute__((unused))
3573
+tcg_out_opc_vssran_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3574
+{
3575
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_HU_W, vd, vj, vk));
3576
+}
3577
+
3578
+/* Emits the `vssran.wu.d vd, vj, vk` instruction. */
3579
+static void __attribute__((unused))
3580
+tcg_out_opc_vssran_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3581
+{
3582
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_WU_D, vd, vj, vk));
3583
+}
3584
+
3585
+/* Emits the `vssrlrn.bu.h vd, vj, vk` instruction. */
3586
+static void __attribute__((unused))
3587
+tcg_out_opc_vssrlrn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3588
+{
3589
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_BU_H, vd, vj, vk));
3590
+}
3591
+
3592
+/* Emits the `vssrlrn.hu.w vd, vj, vk` instruction. */
3593
+static void __attribute__((unused))
3594
+tcg_out_opc_vssrlrn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3595
+{
3596
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_HU_W, vd, vj, vk));
3597
+}
3598
+
3599
+/* Emits the `vssrlrn.wu.d vd, vj, vk` instruction. */
3600
+static void __attribute__((unused))
3601
+tcg_out_opc_vssrlrn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3602
+{
3603
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_WU_D, vd, vj, vk));
3604
+}
3605
+
3606
+/* Emits the `vssrarn.bu.h vd, vj, vk` instruction. */
3607
+static void __attribute__((unused))
3608
+tcg_out_opc_vssrarn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3609
+{
3610
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_BU_H, vd, vj, vk));
3611
+}
3612
+
3613
+/* Emits the `vssrarn.hu.w vd, vj, vk` instruction. */
3614
+static void __attribute__((unused))
3615
+tcg_out_opc_vssrarn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3616
+{
3617
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_HU_W, vd, vj, vk));
3618
+}
3619
+
3620
+/* Emits the `vssrarn.wu.d vd, vj, vk` instruction. */
3621
+static void __attribute__((unused))
3622
+tcg_out_opc_vssrarn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3623
+{
3624
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_WU_D, vd, vj, vk));
3625
+}
3626
+
3627
+/* Emits the `vbitclr.b vd, vj, vk` instruction. */
3628
+static void __attribute__((unused))
3629
+tcg_out_opc_vbitclr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3630
+{
3631
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_B, vd, vj, vk));
3632
+}
3633
+
3634
+/* Emits the `vbitclr.h vd, vj, vk` instruction. */
3635
+static void __attribute__((unused))
3636
+tcg_out_opc_vbitclr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3637
+{
3638
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_H, vd, vj, vk));
3639
+}
3640
+
3641
+/* Emits the `vbitclr.w vd, vj, vk` instruction. */
3642
+static void __attribute__((unused))
3643
+tcg_out_opc_vbitclr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3644
+{
3645
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_W, vd, vj, vk));
3646
+}
3647
+
3648
+/* Emits the `vbitclr.d vd, vj, vk` instruction. */
3649
+static void __attribute__((unused))
3650
+tcg_out_opc_vbitclr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3651
+{
3652
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_D, vd, vj, vk));
3653
+}
3654
+
3655
+/* Emits the `vbitset.b vd, vj, vk` instruction. */
3656
+static void __attribute__((unused))
3657
+tcg_out_opc_vbitset_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3658
+{
3659
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_B, vd, vj, vk));
3660
+}
3661
+
3662
+/* Emits the `vbitset.h vd, vj, vk` instruction. */
3663
+static void __attribute__((unused))
3664
+tcg_out_opc_vbitset_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3665
+{
3666
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_H, vd, vj, vk));
3667
+}
3668
+
3669
+/* Emits the `vbitset.w vd, vj, vk` instruction. */
3670
+static void __attribute__((unused))
3671
+tcg_out_opc_vbitset_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3672
+{
3673
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_W, vd, vj, vk));
3674
+}
3675
+
3676
+/* Emits the `vbitset.d vd, vj, vk` instruction. */
3677
+static void __attribute__((unused))
3678
+tcg_out_opc_vbitset_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3679
+{
3680
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_D, vd, vj, vk));
3681
+}
3682
+
3683
+/* Emits the `vbitrev.b vd, vj, vk` instruction. */
3684
+static void __attribute__((unused))
3685
+tcg_out_opc_vbitrev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3686
+{
3687
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_B, vd, vj, vk));
3688
+}
3689
+
3690
+/* Emits the `vbitrev.h vd, vj, vk` instruction. */
3691
+static void __attribute__((unused))
3692
+tcg_out_opc_vbitrev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3693
+{
3694
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_H, vd, vj, vk));
3695
+}
3696
+
3697
+/* Emits the `vbitrev.w vd, vj, vk` instruction. */
3698
+static void __attribute__((unused))
3699
+tcg_out_opc_vbitrev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3700
+{
3701
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_W, vd, vj, vk));
3702
+}
3703
+
3704
+/* Emits the `vbitrev.d vd, vj, vk` instruction. */
3705
+static void __attribute__((unused))
3706
+tcg_out_opc_vbitrev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3707
+{
3708
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_D, vd, vj, vk));
3709
+}
3710
+
3711
+/* Emits the `vpackev.b vd, vj, vk` instruction. */
3712
+static void __attribute__((unused))
3713
+tcg_out_opc_vpackev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3714
+{
3715
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_B, vd, vj, vk));
3716
+}
3717
+
3718
+/* Emits the `vpackev.h vd, vj, vk` instruction. */
3719
+static void __attribute__((unused))
3720
+tcg_out_opc_vpackev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3721
+{
3722
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_H, vd, vj, vk));
3723
+}
3724
+
3725
+/* Emits the `vpackev.w vd, vj, vk` instruction. */
3726
+static void __attribute__((unused))
3727
+tcg_out_opc_vpackev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3728
+{
3729
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_W, vd, vj, vk));
3730
+}
3731
+
3732
+/* Emits the `vpackev.d vd, vj, vk` instruction. */
3733
+static void __attribute__((unused))
3734
+tcg_out_opc_vpackev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3735
+{
3736
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_D, vd, vj, vk));
3737
+}
3738
+
3739
+/* Emits the `vpackod.b vd, vj, vk` instruction. */
3740
+static void __attribute__((unused))
3741
+tcg_out_opc_vpackod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3742
+{
3743
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_B, vd, vj, vk));
3744
+}
3745
+
3746
+/* Emits the `vpackod.h vd, vj, vk` instruction. */
3747
+static void __attribute__((unused))
3748
+tcg_out_opc_vpackod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3749
+{
3750
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_H, vd, vj, vk));
3751
+}
3752
+
3753
+/* Emits the `vpackod.w vd, vj, vk` instruction. */
3754
+static void __attribute__((unused))
3755
+tcg_out_opc_vpackod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3756
+{
3757
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_W, vd, vj, vk));
3758
+}
3759
+
3760
+/* Emits the `vpackod.d vd, vj, vk` instruction. */
3761
+static void __attribute__((unused))
3762
+tcg_out_opc_vpackod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3763
+{
3764
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_D, vd, vj, vk));
3765
+}
3766
+
3767
+/* Emits the `vilvl.b vd, vj, vk` instruction. */
3768
+static void __attribute__((unused))
3769
+tcg_out_opc_vilvl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3770
+{
3771
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_B, vd, vj, vk));
3772
+}
3773
+
3774
+/* Emits the `vilvl.h vd, vj, vk` instruction. */
3775
+static void __attribute__((unused))
3776
+tcg_out_opc_vilvl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3777
+{
3778
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_H, vd, vj, vk));
3779
+}
3780
+
3781
+/* Emits the `vilvl.w vd, vj, vk` instruction. */
3782
+static void __attribute__((unused))
3783
+tcg_out_opc_vilvl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3784
+{
3785
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_W, vd, vj, vk));
3786
+}
3787
+
3788
+/* Emits the `vilvl.d vd, vj, vk` instruction. */
3789
+static void __attribute__((unused))
3790
+tcg_out_opc_vilvl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3791
+{
3792
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_D, vd, vj, vk));
3793
+}
3794
+
3795
+/* Emits the `vilvh.b vd, vj, vk` instruction. */
3796
+static void __attribute__((unused))
3797
+tcg_out_opc_vilvh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3798
+{
3799
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_B, vd, vj, vk));
3800
+}
3801
+
3802
+/* Emits the `vilvh.h vd, vj, vk` instruction. */
3803
+static void __attribute__((unused))
3804
+tcg_out_opc_vilvh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3805
+{
3806
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_H, vd, vj, vk));
3807
+}
3808
+
3809
+/* Emits the `vilvh.w vd, vj, vk` instruction. */
3810
+static void __attribute__((unused))
3811
+tcg_out_opc_vilvh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3812
+{
3813
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_W, vd, vj, vk));
3814
+}
3815
+
3816
+/* Emits the `vilvh.d vd, vj, vk` instruction. */
3817
+static void __attribute__((unused))
3818
+tcg_out_opc_vilvh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3819
+{
3820
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_D, vd, vj, vk));
3821
+}
3822
+
3823
+/* Emits the `vpickev.b vd, vj, vk` instruction. */
3824
+static void __attribute__((unused))
3825
+tcg_out_opc_vpickev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3826
+{
3827
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_B, vd, vj, vk));
3828
+}
3829
+
3830
+/* Emits the `vpickev.h vd, vj, vk` instruction. */
3831
+static void __attribute__((unused))
3832
+tcg_out_opc_vpickev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3833
+{
3834
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_H, vd, vj, vk));
3835
+}
3836
+
3837
+/* Emits the `vpickev.w vd, vj, vk` instruction. */
3838
+static void __attribute__((unused))
3839
+tcg_out_opc_vpickev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3840
+{
3841
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_W, vd, vj, vk));
3842
+}
3843
+
3844
+/* Emits the `vpickev.d vd, vj, vk` instruction. */
3845
+static void __attribute__((unused))
3846
+tcg_out_opc_vpickev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3847
+{
3848
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_D, vd, vj, vk));
3849
+}
3850
+
3851
+/* Emits the `vpickod.b vd, vj, vk` instruction. */
3852
+static void __attribute__((unused))
3853
+tcg_out_opc_vpickod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3854
+{
3855
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_B, vd, vj, vk));
3856
+}
3857
+
3858
+/* Emits the `vpickod.h vd, vj, vk` instruction. */
3859
+static void __attribute__((unused))
3860
+tcg_out_opc_vpickod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3861
+{
3862
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_H, vd, vj, vk));
3863
+}
3864
+
3865
+/* Emits the `vpickod.w vd, vj, vk` instruction. */
3866
+static void __attribute__((unused))
3867
+tcg_out_opc_vpickod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3868
+{
3869
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_W, vd, vj, vk));
3870
+}
3871
+
3872
+/* Emits the `vpickod.d vd, vj, vk` instruction. */
3873
+static void __attribute__((unused))
3874
+tcg_out_opc_vpickod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3875
+{
3876
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_D, vd, vj, vk));
3877
+}
3878
+
3879
+/* Emits the `vreplve.b vd, vj, k` instruction. */
3880
+static void __attribute__((unused))
3881
+tcg_out_opc_vreplve_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3882
+{
3883
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_B, vd, vj, k));
3884
+}
3885
+
3886
+/* Emits the `vreplve.h vd, vj, k` instruction. */
3887
+static void __attribute__((unused))
3888
+tcg_out_opc_vreplve_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3889
+{
3890
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_H, vd, vj, k));
3891
+}
3892
+
3893
+/* Emits the `vreplve.w vd, vj, k` instruction. */
3894
+static void __attribute__((unused))
3895
+tcg_out_opc_vreplve_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3896
+{
3897
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_W, vd, vj, k));
3898
+}
3899
+
3900
+/* Emits the `vreplve.d vd, vj, k` instruction. */
3901
+static void __attribute__((unused))
3902
+tcg_out_opc_vreplve_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3903
+{
3904
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_D, vd, vj, k));
3905
+}
3906
+
3907
+/* Emits the `vand.v vd, vj, vk` instruction. */
3908
+static void __attribute__((unused))
3909
+tcg_out_opc_vand_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3910
+{
3911
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAND_V, vd, vj, vk));
3912
+}
3913
+
3914
+/* Emits the `vor.v vd, vj, vk` instruction. */
3915
+static void __attribute__((unused))
3916
+tcg_out_opc_vor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3917
+{
3918
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VOR_V, vd, vj, vk));
3919
+}
3920
+
3921
+/* Emits the `vxor.v vd, vj, vk` instruction. */
3922
+static void __attribute__((unused))
3923
+tcg_out_opc_vxor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3924
+{
3925
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VXOR_V, vd, vj, vk));
3926
+}
3927
+
3928
+/* Emits the `vnor.v vd, vj, vk` instruction. */
3929
+static void __attribute__((unused))
3930
+tcg_out_opc_vnor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3931
+{
3932
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VNOR_V, vd, vj, vk));
3933
+}
3934
+
3935
+/* Emits the `vandn.v vd, vj, vk` instruction. */
3936
+static void __attribute__((unused))
3937
+tcg_out_opc_vandn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3938
+{
3939
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VANDN_V, vd, vj, vk));
3940
+}
3941
+
3942
+/* Emits the `vorn.v vd, vj, vk` instruction. */
3943
+static void __attribute__((unused))
3944
+tcg_out_opc_vorn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3945
+{
3946
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VORN_V, vd, vj, vk));
3947
+}
3948
+
3949
+/* Emits the `vfrstp.b vd, vj, vk` instruction. */
3950
+static void __attribute__((unused))
3951
+tcg_out_opc_vfrstp_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3952
+{
3953
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_B, vd, vj, vk));
3954
+}
3955
+
3956
+/* Emits the `vfrstp.h vd, vj, vk` instruction. */
3957
+static void __attribute__((unused))
3958
+tcg_out_opc_vfrstp_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3959
+{
3960
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_H, vd, vj, vk));
3961
+}
3962
+
3963
+/* Emits the `vadd.q vd, vj, vk` instruction. */
3964
+static void __attribute__((unused))
3965
+tcg_out_opc_vadd_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3966
+{
3967
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_Q, vd, vj, vk));
3968
+}
3969
+
3970
+/* Emits the `vsub.q vd, vj, vk` instruction. */
3971
+static void __attribute__((unused))
3972
+tcg_out_opc_vsub_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3973
+{
3974
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_Q, vd, vj, vk));
3975
+}
3976
+
3977
+/* Emits the `vsigncov.b vd, vj, vk` instruction. */
3978
+static void __attribute__((unused))
3979
+tcg_out_opc_vsigncov_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3980
+{
3981
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_B, vd, vj, vk));
3982
+}
3983
+
3984
+/* Emits the `vsigncov.h vd, vj, vk` instruction. */
3985
+static void __attribute__((unused))
3986
+tcg_out_opc_vsigncov_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3987
+{
3988
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_H, vd, vj, vk));
3989
+}
3990
+
3991
+/* Emits the `vsigncov.w vd, vj, vk` instruction. */
3992
+static void __attribute__((unused))
3993
+tcg_out_opc_vsigncov_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3994
+{
3995
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_W, vd, vj, vk));
3996
+}
3997
+
3998
+/* Emits the `vsigncov.d vd, vj, vk` instruction. */
3999
+static void __attribute__((unused))
4000
+tcg_out_opc_vsigncov_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4001
+{
4002
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_D, vd, vj, vk));
4003
+}
4004
+
4005
+/* Emits the `vfadd.s vd, vj, vk` instruction. */
4006
+static void __attribute__((unused))
4007
+tcg_out_opc_vfadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4008
+{
4009
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_S, vd, vj, vk));
4010
+}
4011
+
4012
+/* Emits the `vfadd.d vd, vj, vk` instruction. */
4013
+static void __attribute__((unused))
4014
+tcg_out_opc_vfadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4015
+{
4016
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_D, vd, vj, vk));
4017
+}
4018
+
4019
+/* Emits the `vfsub.s vd, vj, vk` instruction. */
4020
+static void __attribute__((unused))
4021
+tcg_out_opc_vfsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4022
+{
4023
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_S, vd, vj, vk));
4024
+}
4025
+
4026
+/* Emits the `vfsub.d vd, vj, vk` instruction. */
4027
+static void __attribute__((unused))
4028
+tcg_out_opc_vfsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4029
+{
4030
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_D, vd, vj, vk));
4031
+}
4032
+
4033
+/* Emits the `vfmul.s vd, vj, vk` instruction. */
4034
+static void __attribute__((unused))
4035
+tcg_out_opc_vfmul_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4036
+{
4037
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_S, vd, vj, vk));
4038
+}
4039
+
4040
+/* Emits the `vfmul.d vd, vj, vk` instruction. */
4041
+static void __attribute__((unused))
4042
+tcg_out_opc_vfmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4043
+{
4044
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_D, vd, vj, vk));
4045
+}
4046
+
4047
+/* Emits the `vfdiv.s vd, vj, vk` instruction. */
4048
+static void __attribute__((unused))
4049
+tcg_out_opc_vfdiv_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4050
+{
4051
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_S, vd, vj, vk));
4052
+}
4053
+
4054
+/* Emits the `vfdiv.d vd, vj, vk` instruction. */
4055
+static void __attribute__((unused))
4056
+tcg_out_opc_vfdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4057
+{
4058
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_D, vd, vj, vk));
4059
+}
4060
+
4061
+/* Emits the `vfmax.s vd, vj, vk` instruction. */
4062
+static void __attribute__((unused))
4063
+tcg_out_opc_vfmax_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4064
+{
4065
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_S, vd, vj, vk));
4066
+}
4067
+
4068
+/* Emits the `vfmax.d vd, vj, vk` instruction. */
4069
+static void __attribute__((unused))
4070
+tcg_out_opc_vfmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4071
+{
4072
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_D, vd, vj, vk));
4073
+}
4074
+
4075
+/* Emits the `vfmin.s vd, vj, vk` instruction. */
4076
+static void __attribute__((unused))
4077
+tcg_out_opc_vfmin_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4078
+{
4079
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_S, vd, vj, vk));
4080
+}
4081
+
4082
+/* Emits the `vfmin.d vd, vj, vk` instruction. */
4083
+static void __attribute__((unused))
4084
+tcg_out_opc_vfmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4085
+{
4086
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_D, vd, vj, vk));
4087
+}
4088
+
4089
+/* Emits the `vfmaxa.s vd, vj, vk` instruction. */
4090
+static void __attribute__((unused))
4091
+tcg_out_opc_vfmaxa_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4092
+{
4093
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_S, vd, vj, vk));
4094
+}
4095
+
4096
+/* Emits the `vfmaxa.d vd, vj, vk` instruction. */
4097
+static void __attribute__((unused))
4098
+tcg_out_opc_vfmaxa_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4099
+{
4100
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_D, vd, vj, vk));
4101
+}
4102
+
4103
+/* Emits the `vfmina.s vd, vj, vk` instruction. */
4104
+static void __attribute__((unused))
4105
+tcg_out_opc_vfmina_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4106
+{
4107
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_S, vd, vj, vk));
4108
+}
4109
+
4110
+/* Emits the `vfmina.d vd, vj, vk` instruction. */
4111
+static void __attribute__((unused))
4112
+tcg_out_opc_vfmina_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4113
+{
4114
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_D, vd, vj, vk));
4115
+}
4116
+
4117
+/* Emits the `vfcvt.h.s vd, vj, vk` instruction. */
4118
+static void __attribute__((unused))
4119
+tcg_out_opc_vfcvt_h_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4120
+{
4121
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_H_S, vd, vj, vk));
4122
+}
4123
+
4124
+/* Emits the `vfcvt.s.d vd, vj, vk` instruction. */
4125
+static void __attribute__((unused))
4126
+tcg_out_opc_vfcvt_s_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4127
+{
4128
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_S_D, vd, vj, vk));
4129
+}
4130
+
4131
+/* Emits the `vffint.s.l vd, vj, vk` instruction. */
4132
+static void __attribute__((unused))
4133
+tcg_out_opc_vffint_s_l(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4134
+{
4135
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFFINT_S_L, vd, vj, vk));
4136
+}
4137
+
4138
+/* Emits the `vftint.w.d vd, vj, vk` instruction. */
4139
+static void __attribute__((unused))
4140
+tcg_out_opc_vftint_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4141
+{
4142
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINT_W_D, vd, vj, vk));
4143
+}
4144
+
4145
+/* Emits the `vftintrm.w.d vd, vj, vk` instruction. */
4146
+static void __attribute__((unused))
4147
+tcg_out_opc_vftintrm_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4148
+{
4149
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRM_W_D, vd, vj, vk));
4150
+}
4151
+
4152
+/* Emits the `vftintrp.w.d vd, vj, vk` instruction. */
4153
+static void __attribute__((unused))
4154
+tcg_out_opc_vftintrp_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4155
+{
4156
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRP_W_D, vd, vj, vk));
4157
+}
4158
+
4159
+/* Emits the `vftintrz.w.d vd, vj, vk` instruction. */
4160
+static void __attribute__((unused))
4161
+tcg_out_opc_vftintrz_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4162
+{
4163
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRZ_W_D, vd, vj, vk));
4164
+}
4165
+
4166
+/* Emits the `vftintrne.w.d vd, vj, vk` instruction. */
4167
+static void __attribute__((unused))
4168
+tcg_out_opc_vftintrne_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4169
+{
4170
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRNE_W_D, vd, vj, vk));
4171
+}
4172
+
4173
+/* Emits the `vshuf.h vd, vj, vk` instruction. */
4174
+static void __attribute__((unused))
4175
+tcg_out_opc_vshuf_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4176
+{
4177
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_H, vd, vj, vk));
4178
+}
4179
+
4180
+/* Emits the `vshuf.w vd, vj, vk` instruction. */
4181
+static void __attribute__((unused))
4182
+tcg_out_opc_vshuf_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4183
+{
4184
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_W, vd, vj, vk));
4185
+}
4186
+
4187
+/* Emits the `vshuf.d vd, vj, vk` instruction. */
4188
+static void __attribute__((unused))
4189
+tcg_out_opc_vshuf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4190
+{
4191
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_D, vd, vj, vk));
4192
+}
4193
+
4194
+/* Emits the `vseqi.b vd, vj, sk5` instruction. */
4195
+static void __attribute__((unused))
4196
+tcg_out_opc_vseqi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4197
+{
4198
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_B, vd, vj, sk5));
4199
+}
4200
+
4201
+/* Emits the `vseqi.h vd, vj, sk5` instruction. */
4202
+static void __attribute__((unused))
4203
+tcg_out_opc_vseqi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4204
+{
4205
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_H, vd, vj, sk5));
4206
+}
4207
+
4208
+/* Emits the `vseqi.w vd, vj, sk5` instruction. */
4209
+static void __attribute__((unused))
4210
+tcg_out_opc_vseqi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4211
+{
4212
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_W, vd, vj, sk5));
4213
+}
4214
+
4215
+/* Emits the `vseqi.d vd, vj, sk5` instruction. */
4216
+static void __attribute__((unused))
4217
+tcg_out_opc_vseqi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4218
+{
4219
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_D, vd, vj, sk5));
4220
+}
4221
+
4222
+/* Emits the `vslei.b vd, vj, sk5` instruction. */
4223
+static void __attribute__((unused))
4224
+tcg_out_opc_vslei_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4225
+{
4226
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_B, vd, vj, sk5));
4227
+}
4228
+
4229
+/* Emits the `vslei.h vd, vj, sk5` instruction. */
4230
+static void __attribute__((unused))
4231
+tcg_out_opc_vslei_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4232
+{
4233
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_H, vd, vj, sk5));
4234
+}
4235
+
4236
+/* Emits the `vslei.w vd, vj, sk5` instruction. */
4237
+static void __attribute__((unused))
4238
+tcg_out_opc_vslei_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4239
+{
4240
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_W, vd, vj, sk5));
4241
+}
4242
+
4243
+/* Emits the `vslei.d vd, vj, sk5` instruction. */
4244
+static void __attribute__((unused))
4245
+tcg_out_opc_vslei_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4246
+{
4247
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_D, vd, vj, sk5));
4248
+}
4249
+
4250
+/* Emits the `vslei.bu vd, vj, uk5` instruction. */
4251
+static void __attribute__((unused))
4252
+tcg_out_opc_vslei_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4253
+{
4254
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_BU, vd, vj, uk5));
4255
+}
4256
+
4257
+/* Emits the `vslei.hu vd, vj, uk5` instruction. */
4258
+static void __attribute__((unused))
4259
+tcg_out_opc_vslei_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4260
+{
4261
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_HU, vd, vj, uk5));
4262
+}
4263
+
4264
+/* Emits the `vslei.wu vd, vj, uk5` instruction. */
4265
+static void __attribute__((unused))
4266
+tcg_out_opc_vslei_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4267
+{
4268
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_WU, vd, vj, uk5));
4269
+}
4270
+
4271
+/* Emits the `vslei.du vd, vj, uk5` instruction. */
4272
+static void __attribute__((unused))
4273
+tcg_out_opc_vslei_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4274
+{
4275
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_DU, vd, vj, uk5));
4276
+}
4277
+
4278
+/* Emits the `vslti.b vd, vj, sk5` instruction. */
4279
+static void __attribute__((unused))
4280
+tcg_out_opc_vslti_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4281
+{
4282
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_B, vd, vj, sk5));
4283
+}
4284
+
4285
+/* Emits the `vslti.h vd, vj, sk5` instruction. */
4286
+static void __attribute__((unused))
4287
+tcg_out_opc_vslti_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4288
+{
4289
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_H, vd, vj, sk5));
4290
+}
4291
+
4292
+/* Emits the `vslti.w vd, vj, sk5` instruction. */
4293
+static void __attribute__((unused))
4294
+tcg_out_opc_vslti_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4295
+{
4296
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_W, vd, vj, sk5));
4297
+}
4298
+
4299
+/* Emits the `vslti.d vd, vj, sk5` instruction. */
4300
+static void __attribute__((unused))
4301
+tcg_out_opc_vslti_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4302
+{
4303
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_D, vd, vj, sk5));
4304
+}
4305
+
4306
+/* Emits the `vslti.bu vd, vj, uk5` instruction. */
4307
+static void __attribute__((unused))
4308
+tcg_out_opc_vslti_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4309
+{
4310
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_BU, vd, vj, uk5));
4311
+}
4312
+
4313
+/* Emits the `vslti.hu vd, vj, uk5` instruction. */
4314
+static void __attribute__((unused))
4315
+tcg_out_opc_vslti_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4316
+{
4317
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_HU, vd, vj, uk5));
4318
+}
4319
+
4320
+/* Emits the `vslti.wu vd, vj, uk5` instruction. */
4321
+static void __attribute__((unused))
4322
+tcg_out_opc_vslti_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4323
+{
4324
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_WU, vd, vj, uk5));
4325
+}
4326
+
4327
+/* Emits the `vslti.du vd, vj, uk5` instruction. */
4328
+static void __attribute__((unused))
4329
+tcg_out_opc_vslti_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4330
+{
4331
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_DU, vd, vj, uk5));
4332
+}
4333
+
4334
+/* Emits the `vaddi.bu vd, vj, uk5` instruction. */
4335
+static void __attribute__((unused))
4336
+tcg_out_opc_vaddi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4337
+{
4338
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_BU, vd, vj, uk5));
4339
+}
4340
+
4341
+/* Emits the `vaddi.hu vd, vj, uk5` instruction. */
4342
+static void __attribute__((unused))
4343
+tcg_out_opc_vaddi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4344
+{
4345
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_HU, vd, vj, uk5));
4346
+}
4347
+
4348
+/* Emits the `vaddi.wu vd, vj, uk5` instruction. */
4349
+static void __attribute__((unused))
4350
+tcg_out_opc_vaddi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4351
+{
4352
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_WU, vd, vj, uk5));
4353
+}
4354
+
4355
+/* Emits the `vaddi.du vd, vj, uk5` instruction. */
4356
+static void __attribute__((unused))
4357
+tcg_out_opc_vaddi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4358
+{
4359
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_DU, vd, vj, uk5));
4360
+}
4361
+
4362
+/* Emits the `vsubi.bu vd, vj, uk5` instruction. */
4363
+static void __attribute__((unused))
4364
+tcg_out_opc_vsubi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4365
+{
4366
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_BU, vd, vj, uk5));
4367
+}
4368
+
4369
+/* Emits the `vsubi.hu vd, vj, uk5` instruction. */
4370
+static void __attribute__((unused))
4371
+tcg_out_opc_vsubi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4372
+{
4373
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_HU, vd, vj, uk5));
4374
+}
4375
+
4376
+/* Emits the `vsubi.wu vd, vj, uk5` instruction. */
4377
+static void __attribute__((unused))
4378
+tcg_out_opc_vsubi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4379
+{
4380
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_WU, vd, vj, uk5));
4381
+}
4382
+
4383
+/* Emits the `vsubi.du vd, vj, uk5` instruction. */
4384
+static void __attribute__((unused))
4385
+tcg_out_opc_vsubi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4386
+{
4387
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_DU, vd, vj, uk5));
4388
+}
4389
+
4390
+/* Emits the `vbsll.v vd, vj, uk5` instruction. */
4391
+static void __attribute__((unused))
4392
+tcg_out_opc_vbsll_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4393
+{
4394
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSLL_V, vd, vj, uk5));
4395
+}
4396
+
4397
+/* Emits the `vbsrl.v vd, vj, uk5` instruction. */
4398
+static void __attribute__((unused))
4399
+tcg_out_opc_vbsrl_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4400
+{
4401
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSRL_V, vd, vj, uk5));
4402
+}
4403
+
4404
+/* Emits the `vmaxi.b vd, vj, sk5` instruction. */
4405
+static void __attribute__((unused))
4406
+tcg_out_opc_vmaxi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4407
+{
4408
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_B, vd, vj, sk5));
4409
+}
4410
+
4411
+/* Emits the `vmaxi.h vd, vj, sk5` instruction. */
4412
+static void __attribute__((unused))
4413
+tcg_out_opc_vmaxi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4414
+{
4415
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_H, vd, vj, sk5));
4416
+}
4417
+
4418
+/* Emits the `vmaxi.w vd, vj, sk5` instruction. */
4419
+static void __attribute__((unused))
4420
+tcg_out_opc_vmaxi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4421
+{
4422
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_W, vd, vj, sk5));
4423
+}
4424
+
4425
+/* Emits the `vmaxi.d vd, vj, sk5` instruction. */
4426
+static void __attribute__((unused))
4427
+tcg_out_opc_vmaxi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4428
+{
4429
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_D, vd, vj, sk5));
4430
+}
4431
+
4432
+/* Emits the `vmini.b vd, vj, sk5` instruction. */
4433
+static void __attribute__((unused))
4434
+tcg_out_opc_vmini_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4435
+{
4436
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_B, vd, vj, sk5));
4437
+}
4438
+
4439
+/* Emits the `vmini.h vd, vj, sk5` instruction. */
4440
+static void __attribute__((unused))
4441
+tcg_out_opc_vmini_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4442
+{
4443
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_H, vd, vj, sk5));
4444
+}
4445
+
4446
+/* Emits the `vmini.w vd, vj, sk5` instruction. */
4447
+static void __attribute__((unused))
4448
+tcg_out_opc_vmini_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4449
+{
4450
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_W, vd, vj, sk5));
4451
+}
4452
+
4453
+/* Emits the `vmini.d vd, vj, sk5` instruction. */
4454
+static void __attribute__((unused))
4455
+tcg_out_opc_vmini_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4456
+{
4457
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_D, vd, vj, sk5));
4458
+}
4459
+
4460
+/* Emits the `vmaxi.bu vd, vj, uk5` instruction. */
4461
+static void __attribute__((unused))
4462
+tcg_out_opc_vmaxi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4463
+{
4464
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_BU, vd, vj, uk5));
4465
+}
4466
+
4467
+/* Emits the `vmaxi.hu vd, vj, uk5` instruction. */
4468
+static void __attribute__((unused))
4469
+tcg_out_opc_vmaxi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4470
+{
4471
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_HU, vd, vj, uk5));
4472
+}
4473
+
4474
+/* Emits the `vmaxi.wu vd, vj, uk5` instruction. */
4475
+static void __attribute__((unused))
4476
+tcg_out_opc_vmaxi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4477
+{
4478
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_WU, vd, vj, uk5));
4479
+}
4480
+
4481
+/* Emits the `vmaxi.du vd, vj, uk5` instruction. */
4482
+static void __attribute__((unused))
4483
+tcg_out_opc_vmaxi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4484
+{
4485
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_DU, vd, vj, uk5));
4486
+}
4487
+
4488
+/* Emits the `vmini.bu vd, vj, uk5` instruction. */
4489
+static void __attribute__((unused))
4490
+tcg_out_opc_vmini_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4491
+{
4492
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_BU, vd, vj, uk5));
4493
+}
4494
+
4495
+/* Emits the `vmini.hu vd, vj, uk5` instruction. */
4496
+static void __attribute__((unused))
4497
+tcg_out_opc_vmini_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4498
+{
4499
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_HU, vd, vj, uk5));
4500
+}
4501
+
4502
+/* Emits the `vmini.wu vd, vj, uk5` instruction. */
4503
+static void __attribute__((unused))
4504
+tcg_out_opc_vmini_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4505
+{
4506
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_WU, vd, vj, uk5));
4507
+}
4508
+
4509
+/* Emits the `vmini.du vd, vj, uk5` instruction. */
4510
+static void __attribute__((unused))
4511
+tcg_out_opc_vmini_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4512
+{
4513
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_DU, vd, vj, uk5));
4514
+}
4515
+
4516
+/* Emits the `vfrstpi.b vd, vj, uk5` instruction. */
4517
+static void __attribute__((unused))
4518
+tcg_out_opc_vfrstpi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4519
+{
4520
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_B, vd, vj, uk5));
4521
+}
4522
+
4523
+/* Emits the `vfrstpi.h vd, vj, uk5` instruction. */
4524
+static void __attribute__((unused))
4525
+tcg_out_opc_vfrstpi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4526
+{
4527
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_H, vd, vj, uk5));
4528
+}
4529
+
4530
+/* Emits the `vclo.b vd, vj` instruction. */
4531
+static void __attribute__((unused))
4532
+tcg_out_opc_vclo_b(TCGContext *s, TCGReg vd, TCGReg vj)
4533
+{
4534
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_B, vd, vj));
4535
+}
4536
+
4537
+/* Emits the `vclo.h vd, vj` instruction. */
4538
+static void __attribute__((unused))
4539
+tcg_out_opc_vclo_h(TCGContext *s, TCGReg vd, TCGReg vj)
4540
+{
4541
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_H, vd, vj));
4542
+}
4543
+
4544
+/* Emits the `vclo.w vd, vj` instruction. */
4545
+static void __attribute__((unused))
4546
+tcg_out_opc_vclo_w(TCGContext *s, TCGReg vd, TCGReg vj)
4547
+{
4548
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_W, vd, vj));
4549
+}
4550
+
4551
+/* Emits the `vclo.d vd, vj` instruction. */
4552
+static void __attribute__((unused))
4553
+tcg_out_opc_vclo_d(TCGContext *s, TCGReg vd, TCGReg vj)
4554
+{
4555
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_D, vd, vj));
4556
+}
4557
+
4558
+/* Emits the `vclz.b vd, vj` instruction. */
4559
+static void __attribute__((unused))
4560
+tcg_out_opc_vclz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4561
+{
4562
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_B, vd, vj));
4563
+}
4564
+
4565
+/* Emits the `vclz.h vd, vj` instruction. */
4566
+static void __attribute__((unused))
4567
+tcg_out_opc_vclz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4568
+{
4569
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_H, vd, vj));
4570
+}
4571
+
4572
+/* Emits the `vclz.w vd, vj` instruction. */
4573
+static void __attribute__((unused))
4574
+tcg_out_opc_vclz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4575
+{
4576
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_W, vd, vj));
4577
+}
4578
+
4579
+/* Emits the `vclz.d vd, vj` instruction. */
4580
+static void __attribute__((unused))
4581
+tcg_out_opc_vclz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4582
+{
4583
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_D, vd, vj));
4584
+}
4585
+
4586
+/* Emits the `vpcnt.b vd, vj` instruction. */
4587
+static void __attribute__((unused))
4588
+tcg_out_opc_vpcnt_b(TCGContext *s, TCGReg vd, TCGReg vj)
4589
+{
4590
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_B, vd, vj));
4591
+}
4592
+
4593
+/* Emits the `vpcnt.h vd, vj` instruction. */
4594
+static void __attribute__((unused))
4595
+tcg_out_opc_vpcnt_h(TCGContext *s, TCGReg vd, TCGReg vj)
4596
+{
4597
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_H, vd, vj));
4598
+}
4599
+
4600
+/* Emits the `vpcnt.w vd, vj` instruction. */
4601
+static void __attribute__((unused))
4602
+tcg_out_opc_vpcnt_w(TCGContext *s, TCGReg vd, TCGReg vj)
4603
+{
4604
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_W, vd, vj));
4605
+}
4606
+
4607
+/* Emits the `vpcnt.d vd, vj` instruction. */
4608
+static void __attribute__((unused))
4609
+tcg_out_opc_vpcnt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4610
+{
4611
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_D, vd, vj));
4612
+}
4613
+
4614
+/* Emits the `vneg.b vd, vj` instruction. */
4615
+static void __attribute__((unused))
4616
+tcg_out_opc_vneg_b(TCGContext *s, TCGReg vd, TCGReg vj)
4617
+{
4618
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_B, vd, vj));
4619
+}
4620
+
4621
+/* Emits the `vneg.h vd, vj` instruction. */
4622
+static void __attribute__((unused))
4623
+tcg_out_opc_vneg_h(TCGContext *s, TCGReg vd, TCGReg vj)
4624
+{
4625
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_H, vd, vj));
4626
+}
4627
+
4628
+/* Emits the `vneg.w vd, vj` instruction. */
4629
+static void __attribute__((unused))
4630
+tcg_out_opc_vneg_w(TCGContext *s, TCGReg vd, TCGReg vj)
4631
+{
4632
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_W, vd, vj));
4633
+}
4634
+
4635
+/* Emits the `vneg.d vd, vj` instruction. */
4636
+static void __attribute__((unused))
4637
+tcg_out_opc_vneg_d(TCGContext *s, TCGReg vd, TCGReg vj)
4638
+{
4639
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_D, vd, vj));
4640
+}
4641
+
4642
+/* Emits the `vmskltz.b vd, vj` instruction. */
4643
+static void __attribute__((unused))
4644
+tcg_out_opc_vmskltz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4645
+{
4646
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_B, vd, vj));
4647
+}
4648
+
4649
+/* Emits the `vmskltz.h vd, vj` instruction. */
4650
+static void __attribute__((unused))
4651
+tcg_out_opc_vmskltz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4652
+{
4653
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_H, vd, vj));
4654
+}
4655
+
4656
+/* Emits the `vmskltz.w vd, vj` instruction. */
4657
+static void __attribute__((unused))
4658
+tcg_out_opc_vmskltz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4659
+{
4660
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_W, vd, vj));
4661
+}
4662
+
4663
+/* Emits the `vmskltz.d vd, vj` instruction. */
4664
+static void __attribute__((unused))
4665
+tcg_out_opc_vmskltz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4666
+{
4667
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_D, vd, vj));
4668
+}
4669
+
4670
+/* Emits the `vmskgez.b vd, vj` instruction. */
4671
+static void __attribute__((unused))
4672
+tcg_out_opc_vmskgez_b(TCGContext *s, TCGReg vd, TCGReg vj)
4673
+{
4674
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKGEZ_B, vd, vj));
4675
+}
4676
+
4677
+/* Emits the `vmsknz.b vd, vj` instruction. */
4678
+static void __attribute__((unused))
4679
+tcg_out_opc_vmsknz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4680
+{
4681
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKNZ_B, vd, vj));
4682
+}
4683
+
4684
+/* Emits the `vseteqz.v cd, vj` instruction. */
4685
+static void __attribute__((unused))
4686
+tcg_out_opc_vseteqz_v(TCGContext *s, TCGReg cd, TCGReg vj)
4687
+{
4688
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETEQZ_V, cd, vj));
4689
+}
4690
+
4691
+/* Emits the `vsetnez.v cd, vj` instruction. */
4692
+static void __attribute__((unused))
4693
+tcg_out_opc_vsetnez_v(TCGContext *s, TCGReg cd, TCGReg vj)
4694
+{
4695
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETNEZ_V, cd, vj));
4696
+}
4697
+
4698
+/* Emits the `vsetanyeqz.b cd, vj` instruction. */
4699
+static void __attribute__((unused))
4700
+tcg_out_opc_vsetanyeqz_b(TCGContext *s, TCGReg cd, TCGReg vj)
4701
+{
4702
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_B, cd, vj));
4703
+}
4704
+
4705
+/* Emits the `vsetanyeqz.h cd, vj` instruction. */
4706
+static void __attribute__((unused))
4707
+tcg_out_opc_vsetanyeqz_h(TCGContext *s, TCGReg cd, TCGReg vj)
4708
+{
4709
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_H, cd, vj));
4710
+}
4711
+
4712
+/* Emits the `vsetanyeqz.w cd, vj` instruction. */
4713
+static void __attribute__((unused))
4714
+tcg_out_opc_vsetanyeqz_w(TCGContext *s, TCGReg cd, TCGReg vj)
4715
+{
4716
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_W, cd, vj));
4717
+}
4718
+
4719
+/* Emits the `vsetanyeqz.d cd, vj` instruction. */
4720
+static void __attribute__((unused))
4721
+tcg_out_opc_vsetanyeqz_d(TCGContext *s, TCGReg cd, TCGReg vj)
4722
+{
4723
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_D, cd, vj));
4724
+}
4725
+
4726
+/* Emits the `vsetallnez.b cd, vj` instruction. */
4727
+static void __attribute__((unused))
4728
+tcg_out_opc_vsetallnez_b(TCGContext *s, TCGReg cd, TCGReg vj)
4729
+{
4730
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_B, cd, vj));
4731
+}
4732
+
4733
+/* Emits the `vsetallnez.h cd, vj` instruction. */
4734
+static void __attribute__((unused))
4735
+tcg_out_opc_vsetallnez_h(TCGContext *s, TCGReg cd, TCGReg vj)
4736
+{
4737
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_H, cd, vj));
4738
+}
4739
+
4740
+/* Emits the `vsetallnez.w cd, vj` instruction. */
4741
+static void __attribute__((unused))
4742
+tcg_out_opc_vsetallnez_w(TCGContext *s, TCGReg cd, TCGReg vj)
4743
+{
4744
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_W, cd, vj));
4745
+}
4746
+
4747
+/* Emits the `vsetallnez.d cd, vj` instruction. */
4748
+static void __attribute__((unused))
4749
+tcg_out_opc_vsetallnez_d(TCGContext *s, TCGReg cd, TCGReg vj)
4750
+{
4751
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_D, cd, vj));
4752
+}
4753
+
4754
+/* Emits the `vflogb.s vd, vj` instruction. */
4755
+static void __attribute__((unused))
4756
+tcg_out_opc_vflogb_s(TCGContext *s, TCGReg vd, TCGReg vj)
4757
+{
4758
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_S, vd, vj));
4759
+}
4760
+
4761
+/* Emits the `vflogb.d vd, vj` instruction. */
4762
+static void __attribute__((unused))
4763
+tcg_out_opc_vflogb_d(TCGContext *s, TCGReg vd, TCGReg vj)
4764
+{
4765
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_D, vd, vj));
4766
+}
4767
+
4768
+/* Emits the `vfclass.s vd, vj` instruction. */
4769
+static void __attribute__((unused))
4770
+tcg_out_opc_vfclass_s(TCGContext *s, TCGReg vd, TCGReg vj)
4771
+{
4772
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_S, vd, vj));
4773
+}
4774
+
4775
+/* Emits the `vfclass.d vd, vj` instruction. */
4776
+static void __attribute__((unused))
4777
+tcg_out_opc_vfclass_d(TCGContext *s, TCGReg vd, TCGReg vj)
4778
+{
4779
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_D, vd, vj));
4780
+}
4781
+
4782
+/* Emits the `vfsqrt.s vd, vj` instruction. */
4783
+static void __attribute__((unused))
4784
+tcg_out_opc_vfsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4785
+{
4786
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_S, vd, vj));
4787
+}
4788
+
4789
+/* Emits the `vfsqrt.d vd, vj` instruction. */
4790
+static void __attribute__((unused))
4791
+tcg_out_opc_vfsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4792
+{
4793
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_D, vd, vj));
4794
+}
4795
+
4796
+/* Emits the `vfrecip.s vd, vj` instruction. */
4797
+static void __attribute__((unused))
4798
+tcg_out_opc_vfrecip_s(TCGContext *s, TCGReg vd, TCGReg vj)
4799
+{
4800
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_S, vd, vj));
4801
+}
4802
+
4803
+/* Emits the `vfrecip.d vd, vj` instruction. */
4804
+static void __attribute__((unused))
4805
+tcg_out_opc_vfrecip_d(TCGContext *s, TCGReg vd, TCGReg vj)
4806
+{
4807
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_D, vd, vj));
4808
+}
4809
+
4810
+/* Emits the `vfrsqrt.s vd, vj` instruction. */
4811
+static void __attribute__((unused))
4812
+tcg_out_opc_vfrsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4813
+{
4814
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_S, vd, vj));
4815
+}
4816
+
4817
+/* Emits the `vfrsqrt.d vd, vj` instruction. */
4818
+static void __attribute__((unused))
4819
+tcg_out_opc_vfrsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4820
+{
4821
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_D, vd, vj));
4822
+}
4823
+
4824
+/* Emits the `vfrint.s vd, vj` instruction. */
4825
+static void __attribute__((unused))
4826
+tcg_out_opc_vfrint_s(TCGContext *s, TCGReg vd, TCGReg vj)
4827
+{
4828
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_S, vd, vj));
4829
+}
4830
+
4831
+/* Emits the `vfrint.d vd, vj` instruction. */
4832
+static void __attribute__((unused))
4833
+tcg_out_opc_vfrint_d(TCGContext *s, TCGReg vd, TCGReg vj)
4834
+{
4835
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_D, vd, vj));
4836
+}
4837
+
4838
+/* Emits the `vfrintrm.s vd, vj` instruction. */
4839
+static void __attribute__((unused))
4840
+tcg_out_opc_vfrintrm_s(TCGContext *s, TCGReg vd, TCGReg vj)
4841
+{
4842
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_S, vd, vj));
4843
+}
4844
+
4845
+/* Emits the `vfrintrm.d vd, vj` instruction. */
4846
+static void __attribute__((unused))
4847
+tcg_out_opc_vfrintrm_d(TCGContext *s, TCGReg vd, TCGReg vj)
4848
+{
4849
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_D, vd, vj));
4850
+}
4851
+
4852
+/* Emits the `vfrintrp.s vd, vj` instruction. */
4853
+static void __attribute__((unused))
4854
+tcg_out_opc_vfrintrp_s(TCGContext *s, TCGReg vd, TCGReg vj)
4855
+{
4856
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_S, vd, vj));
4857
+}
4858
+
4859
+/* Emits the `vfrintrp.d vd, vj` instruction. */
4860
+static void __attribute__((unused))
4861
+tcg_out_opc_vfrintrp_d(TCGContext *s, TCGReg vd, TCGReg vj)
4862
+{
4863
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_D, vd, vj));
4864
+}
4865
+
4866
+/* Emits the `vfrintrz.s vd, vj` instruction. */
4867
+static void __attribute__((unused))
4868
+tcg_out_opc_vfrintrz_s(TCGContext *s, TCGReg vd, TCGReg vj)
4869
+{
4870
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_S, vd, vj));
4871
+}
4872
+
4873
+/* Emits the `vfrintrz.d vd, vj` instruction. */
4874
+static void __attribute__((unused))
4875
+tcg_out_opc_vfrintrz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4876
+{
4877
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_D, vd, vj));
4878
+}
4879
+
4880
+/* Emits the `vfrintrne.s vd, vj` instruction. */
4881
+static void __attribute__((unused))
4882
+tcg_out_opc_vfrintrne_s(TCGContext *s, TCGReg vd, TCGReg vj)
4883
+{
4884
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_S, vd, vj));
4885
+}
4886
+
4887
+/* Emits the `vfrintrne.d vd, vj` instruction. */
4888
+static void __attribute__((unused))
4889
+tcg_out_opc_vfrintrne_d(TCGContext *s, TCGReg vd, TCGReg vj)
4890
+{
4891
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_D, vd, vj));
4892
+}
4893
+
4894
+/* Emits the `vfcvtl.s.h vd, vj` instruction. */
4895
+static void __attribute__((unused))
4896
+tcg_out_opc_vfcvtl_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4897
+{
4898
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_S_H, vd, vj));
4899
+}
4900
+
4901
+/* Emits the `vfcvth.s.h vd, vj` instruction. */
4902
+static void __attribute__((unused))
4903
+tcg_out_opc_vfcvth_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4904
+{
4905
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_S_H, vd, vj));
4906
+}
4907
+
4908
+/* Emits the `vfcvtl.d.s vd, vj` instruction. */
4909
+static void __attribute__((unused))
4910
+tcg_out_opc_vfcvtl_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4911
+{
4912
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_D_S, vd, vj));
4913
+}
4914
+
4915
+/* Emits the `vfcvth.d.s vd, vj` instruction. */
4916
+static void __attribute__((unused))
4917
+tcg_out_opc_vfcvth_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4918
+{
4919
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_D_S, vd, vj));
4920
+}
4921
+
4922
+/* Emits the `vffint.s.w vd, vj` instruction. */
4923
+static void __attribute__((unused))
4924
+tcg_out_opc_vffint_s_w(TCGContext *s, TCGReg vd, TCGReg vj)
4925
+{
4926
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_W, vd, vj));
4927
+}
4928
+
4929
+/* Emits the `vffint.s.wu vd, vj` instruction. */
4930
+static void __attribute__((unused))
4931
+tcg_out_opc_vffint_s_wu(TCGContext *s, TCGReg vd, TCGReg vj)
4932
+{
4933
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_WU, vd, vj));
4934
+}
4935
+
4936
+/* Emits the `vffint.d.l vd, vj` instruction. */
4937
+static void __attribute__((unused))
4938
+tcg_out_opc_vffint_d_l(TCGContext *s, TCGReg vd, TCGReg vj)
4939
+{
4940
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_L, vd, vj));
4941
+}
4942
+
4943
+/* Emits the `vffint.d.lu vd, vj` instruction. */
4944
+static void __attribute__((unused))
4945
+tcg_out_opc_vffint_d_lu(TCGContext *s, TCGReg vd, TCGReg vj)
4946
+{
4947
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_LU, vd, vj));
4948
+}
4949
+
4950
+/* Emits the `vffintl.d.w vd, vj` instruction. */
4951
+static void __attribute__((unused))
4952
+tcg_out_opc_vffintl_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4953
+{
4954
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTL_D_W, vd, vj));
4955
+}
4956
+
4957
+/* Emits the `vffinth.d.w vd, vj` instruction. */
4958
+static void __attribute__((unused))
4959
+tcg_out_opc_vffinth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4960
+{
4961
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTH_D_W, vd, vj));
4962
+}
4963
+
4964
+/* Emits the `vftint.w.s vd, vj` instruction. */
4965
+static void __attribute__((unused))
4966
+tcg_out_opc_vftint_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4967
+{
4968
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_W_S, vd, vj));
4969
+}
4970
+
4971
+/* Emits the `vftint.l.d vd, vj` instruction. */
4972
+static void __attribute__((unused))
4973
+tcg_out_opc_vftint_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4974
+{
4975
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_L_D, vd, vj));
4976
+}
4977
+
4978
+/* Emits the `vftintrm.w.s vd, vj` instruction. */
4979
+static void __attribute__((unused))
4980
+tcg_out_opc_vftintrm_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4981
+{
4982
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_W_S, vd, vj));
4983
+}
4984
+
4985
+/* Emits the `vftintrm.l.d vd, vj` instruction. */
4986
+static void __attribute__((unused))
4987
+tcg_out_opc_vftintrm_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4988
+{
4989
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_L_D, vd, vj));
4990
+}
4991
+
4992
+/* Emits the `vftintrp.w.s vd, vj` instruction. */
4993
+static void __attribute__((unused))
4994
+tcg_out_opc_vftintrp_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4995
+{
4996
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_W_S, vd, vj));
4997
+}
4998
+
4999
+/* Emits the `vftintrp.l.d vd, vj` instruction. */
5000
+static void __attribute__((unused))
5001
+tcg_out_opc_vftintrp_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5002
+{
5003
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_L_D, vd, vj));
5004
+}
5005
+
5006
+/* Emits the `vftintrz.w.s vd, vj` instruction. */
5007
+static void __attribute__((unused))
5008
+tcg_out_opc_vftintrz_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
5009
+{
5010
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_W_S, vd, vj));
5011
+}
5012
+
5013
+/* Emits the `vftintrz.l.d vd, vj` instruction. */
5014
+static void __attribute__((unused))
5015
+tcg_out_opc_vftintrz_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5016
+{
5017
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_L_D, vd, vj));
5018
+}
5019
+
5020
+/* Emits the `vftintrne.w.s vd, vj` instruction. */
5021
+static void __attribute__((unused))
5022
+tcg_out_opc_vftintrne_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
5023
+{
5024
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_W_S, vd, vj));
5025
+}
5026
+
5027
+/* Emits the `vftintrne.l.d vd, vj` instruction. */
5028
+static void __attribute__((unused))
5029
+tcg_out_opc_vftintrne_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5030
+{
5031
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_L_D, vd, vj));
5032
+}
5033
+
5034
+/* Emits the `vftint.wu.s vd, vj` instruction. */
5035
+static void __attribute__((unused))
5036
+tcg_out_opc_vftint_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
5037
+{
5038
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_WU_S, vd, vj));
5039
+}
5040
+
5041
+/* Emits the `vftint.lu.d vd, vj` instruction. */
5042
+static void __attribute__((unused))
5043
+tcg_out_opc_vftint_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
5044
+{
5045
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_LU_D, vd, vj));
5046
+}
5047
+
5048
+/* Emits the `vftintrz.wu.s vd, vj` instruction. */
5049
+static void __attribute__((unused))
5050
+tcg_out_opc_vftintrz_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
5051
+{
5052
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_WU_S, vd, vj));
5053
+}
5054
+
5055
+/* Emits the `vftintrz.lu.d vd, vj` instruction. */
5056
+static void __attribute__((unused))
5057
+tcg_out_opc_vftintrz_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
5058
+{
5059
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_LU_D, vd, vj));
5060
+}
5061
+
5062
+/* Emits the `vftintl.l.s vd, vj` instruction. */
5063
+static void __attribute__((unused))
5064
+tcg_out_opc_vftintl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5065
+{
5066
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTL_L_S, vd, vj));
5067
+}
5068
+
5069
+/* Emits the `vftinth.l.s vd, vj` instruction. */
5070
+static void __attribute__((unused))
5071
+tcg_out_opc_vftinth_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5072
+{
5073
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTH_L_S, vd, vj));
5074
+}
5075
+
5076
+/* Emits the `vftintrml.l.s vd, vj` instruction. */
5077
+static void __attribute__((unused))
5078
+tcg_out_opc_vftintrml_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5079
+{
5080
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRML_L_S, vd, vj));
5081
+}
5082
+
5083
+/* Emits the `vftintrmh.l.s vd, vj` instruction. */
5084
+static void __attribute__((unused))
5085
+tcg_out_opc_vftintrmh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5086
+{
5087
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRMH_L_S, vd, vj));
5088
+}
5089
+
5090
+/* Emits the `vftintrpl.l.s vd, vj` instruction. */
5091
+static void __attribute__((unused))
5092
+tcg_out_opc_vftintrpl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5093
+{
5094
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPL_L_S, vd, vj));
5095
+}
5096
+
5097
+/* Emits the `vftintrph.l.s vd, vj` instruction. */
5098
+static void __attribute__((unused))
5099
+tcg_out_opc_vftintrph_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5100
+{
5101
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPH_L_S, vd, vj));
5102
+}
5103
+
5104
+/* Emits the `vftintrzl.l.s vd, vj` instruction. */
5105
+static void __attribute__((unused))
5106
+tcg_out_opc_vftintrzl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5107
+{
5108
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZL_L_S, vd, vj));
5109
+}
5110
+
5111
+/* Emits the `vftintrzh.l.s vd, vj` instruction. */
5112
+static void __attribute__((unused))
5113
+tcg_out_opc_vftintrzh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5114
+{
5115
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZH_L_S, vd, vj));
5116
+}
5117
+
5118
+/* Emits the `vftintrnel.l.s vd, vj` instruction. */
5119
+static void __attribute__((unused))
5120
+tcg_out_opc_vftintrnel_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5121
+{
5122
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEL_L_S, vd, vj));
5123
+}
5124
+
5125
+/* Emits the `vftintrneh.l.s vd, vj` instruction. */
5126
+static void __attribute__((unused))
5127
+tcg_out_opc_vftintrneh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5128
+{
5129
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEH_L_S, vd, vj));
5130
+}
5131
+
5132
+/* Emits the `vexth.h.b vd, vj` instruction. */
5133
+static void __attribute__((unused))
5134
+tcg_out_opc_vexth_h_b(TCGContext *s, TCGReg vd, TCGReg vj)
5135
+{
5136
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_H_B, vd, vj));
5137
+}
5138
+
5139
+/* Emits the `vexth.w.h vd, vj` instruction. */
5140
+static void __attribute__((unused))
5141
+tcg_out_opc_vexth_w_h(TCGContext *s, TCGReg vd, TCGReg vj)
5142
+{
5143
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_W_H, vd, vj));
5144
+}
5145
+
5146
+/* Emits the `vexth.d.w vd, vj` instruction. */
5147
+static void __attribute__((unused))
5148
+tcg_out_opc_vexth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
5149
+{
5150
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_D_W, vd, vj));
5151
+}
5152
+
5153
+/* Emits the `vexth.q.d vd, vj` instruction. */
5154
+static void __attribute__((unused))
5155
+tcg_out_opc_vexth_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
5156
+{
5157
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_Q_D, vd, vj));
5158
+}
5159
+
5160
+/* Emits the `vexth.hu.bu vd, vj` instruction. */
5161
+static void __attribute__((unused))
5162
+tcg_out_opc_vexth_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj)
5163
+{
5164
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_HU_BU, vd, vj));
5165
+}
5166
+
5167
+/* Emits the `vexth.wu.hu vd, vj` instruction. */
5168
+static void __attribute__((unused))
5169
+tcg_out_opc_vexth_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj)
5170
+{
5171
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_WU_HU, vd, vj));
5172
+}
5173
+
5174
+/* Emits the `vexth.du.wu vd, vj` instruction. */
5175
+static void __attribute__((unused))
5176
+tcg_out_opc_vexth_du_wu(TCGContext *s, TCGReg vd, TCGReg vj)
5177
+{
5178
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_DU_WU, vd, vj));
5179
+}
5180
+
5181
+/* Emits the `vexth.qu.du vd, vj` instruction. */
5182
+static void __attribute__((unused))
5183
+tcg_out_opc_vexth_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
5184
+{
5185
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_QU_DU, vd, vj));
5186
+}
5187
+
5188
+/* Emits the `vreplgr2vr.b vd, j` instruction. */
5189
+static void __attribute__((unused))
5190
+tcg_out_opc_vreplgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j)
5191
+{
5192
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_B, vd, j));
5193
+}
5194
+
5195
+/* Emits the `vreplgr2vr.h vd, j` instruction. */
5196
+static void __attribute__((unused))
5197
+tcg_out_opc_vreplgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j)
5198
+{
5199
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_H, vd, j));
5200
+}
5201
+
5202
+/* Emits the `vreplgr2vr.w vd, j` instruction. */
5203
+static void __attribute__((unused))
5204
+tcg_out_opc_vreplgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j)
5205
+{
5206
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_W, vd, j));
5207
+}
5208
+
5209
+/* Emits the `vreplgr2vr.d vd, j` instruction. */
5210
+static void __attribute__((unused))
5211
+tcg_out_opc_vreplgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j)
5212
+{
5213
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_D, vd, j));
5214
+}
5215
+
5216
+/* Emits the `vrotri.b vd, vj, uk3` instruction. */
5217
+static void __attribute__((unused))
5218
+tcg_out_opc_vrotri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5219
+{
5220
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VROTRI_B, vd, vj, uk3));
5221
+}
5222
+
5223
+/* Emits the `vrotri.h vd, vj, uk4` instruction. */
5224
+static void __attribute__((unused))
5225
+tcg_out_opc_vrotri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5226
+{
5227
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VROTRI_H, vd, vj, uk4));
5228
+}
5229
+
5230
+/* Emits the `vrotri.w vd, vj, uk5` instruction. */
5231
+static void __attribute__((unused))
5232
+tcg_out_opc_vrotri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5233
+{
5234
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VROTRI_W, vd, vj, uk5));
5235
+}
5236
+
5237
+/* Emits the `vrotri.d vd, vj, uk6` instruction. */
5238
+static void __attribute__((unused))
5239
+tcg_out_opc_vrotri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5240
+{
5241
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VROTRI_D, vd, vj, uk6));
5242
+}
5243
+
5244
+/* Emits the `vsrlri.b vd, vj, uk3` instruction. */
5245
+static void __attribute__((unused))
5246
+tcg_out_opc_vsrlri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5247
+{
5248
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLRI_B, vd, vj, uk3));
5249
+}
5250
+
5251
+/* Emits the `vsrlri.h vd, vj, uk4` instruction. */
5252
+static void __attribute__((unused))
5253
+tcg_out_opc_vsrlri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5254
+{
5255
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRI_H, vd, vj, uk4));
5256
+}
5257
+
5258
+/* Emits the `vsrlri.w vd, vj, uk5` instruction. */
5259
+static void __attribute__((unused))
5260
+tcg_out_opc_vsrlri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5261
+{
5262
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRI_W, vd, vj, uk5));
5263
+}
5264
+
5265
+/* Emits the `vsrlri.d vd, vj, uk6` instruction. */
5266
+static void __attribute__((unused))
5267
+tcg_out_opc_vsrlri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5268
+{
5269
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRI_D, vd, vj, uk6));
5270
+}
5271
+
5272
+/* Emits the `vsrari.b vd, vj, uk3` instruction. */
5273
+static void __attribute__((unused))
5274
+tcg_out_opc_vsrari_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5275
+{
5276
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRARI_B, vd, vj, uk3));
5277
+}
5278
+
5279
+/* Emits the `vsrari.h vd, vj, uk4` instruction. */
5280
+static void __attribute__((unused))
5281
+tcg_out_opc_vsrari_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5282
+{
5283
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARI_H, vd, vj, uk4));
5284
+}
5285
+
5286
+/* Emits the `vsrari.w vd, vj, uk5` instruction. */
5287
+static void __attribute__((unused))
5288
+tcg_out_opc_vsrari_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5289
+{
5290
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARI_W, vd, vj, uk5));
5291
+}
5292
+
5293
+/* Emits the `vsrari.d vd, vj, uk6` instruction. */
5294
+static void __attribute__((unused))
5295
+tcg_out_opc_vsrari_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5296
+{
5297
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARI_D, vd, vj, uk6));
5298
+}
5299
+
5300
+/* Emits the `vinsgr2vr.b vd, j, uk4` instruction. */
5301
+static void __attribute__((unused))
5302
+tcg_out_opc_vinsgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk4)
5303
+{
5304
+ tcg_out32(s, encode_vdjuk4_insn(OPC_VINSGR2VR_B, vd, j, uk4));
5305
+}
5306
+
5307
+/* Emits the `vinsgr2vr.h vd, j, uk3` instruction. */
5308
+static void __attribute__((unused))
5309
+tcg_out_opc_vinsgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk3)
5310
+{
5311
+ tcg_out32(s, encode_vdjuk3_insn(OPC_VINSGR2VR_H, vd, j, uk3));
5312
+}
5313
+
5314
+/* Emits the `vinsgr2vr.w vd, j, uk2` instruction. */
5315
+static void __attribute__((unused))
5316
+tcg_out_opc_vinsgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk2)
5317
+{
5318
+ tcg_out32(s, encode_vdjuk2_insn(OPC_VINSGR2VR_W, vd, j, uk2));
5319
+}
5320
+
5321
+/* Emits the `vinsgr2vr.d vd, j, uk1` instruction. */
5322
+static void __attribute__((unused))
5323
+tcg_out_opc_vinsgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk1)
5324
+{
5325
+ tcg_out32(s, encode_vdjuk1_insn(OPC_VINSGR2VR_D, vd, j, uk1));
5326
+}
5327
+
5328
+/* Emits the `vpickve2gr.b d, vj, uk4` instruction. */
5329
+static void __attribute__((unused))
5330
+tcg_out_opc_vpickve2gr_b(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
5331
+{
5332
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_B, d, vj, uk4));
5333
+}
5334
+
5335
+/* Emits the `vpickve2gr.h d, vj, uk3` instruction. */
5336
+static void __attribute__((unused))
5337
+tcg_out_opc_vpickve2gr_h(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
5338
+{
5339
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_H, d, vj, uk3));
5340
+}
5341
+
5342
+/* Emits the `vpickve2gr.w d, vj, uk2` instruction. */
5343
+static void __attribute__((unused))
5344
+tcg_out_opc_vpickve2gr_w(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
5345
+{
5346
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_W, d, vj, uk2));
5347
+}
5348
+
5349
+/* Emits the `vpickve2gr.d d, vj, uk1` instruction. */
5350
+static void __attribute__((unused))
5351
+tcg_out_opc_vpickve2gr_d(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
5352
+{
5353
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_D, d, vj, uk1));
5354
+}
5355
+
5356
+/* Emits the `vpickve2gr.bu d, vj, uk4` instruction. */
5357
+static void __attribute__((unused))
5358
+tcg_out_opc_vpickve2gr_bu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
5359
+{
5360
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_BU, d, vj, uk4));
5361
+}
5362
+
5363
+/* Emits the `vpickve2gr.hu d, vj, uk3` instruction. */
5364
+static void __attribute__((unused))
5365
+tcg_out_opc_vpickve2gr_hu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
5366
+{
5367
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_HU, d, vj, uk3));
5368
+}
5369
+
5370
+/* Emits the `vpickve2gr.wu d, vj, uk2` instruction. */
5371
+static void __attribute__((unused))
5372
+tcg_out_opc_vpickve2gr_wu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
5373
+{
5374
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_WU, d, vj, uk2));
5375
+}
5376
+
5377
+/* Emits the `vpickve2gr.du d, vj, uk1` instruction. */
5378
+static void __attribute__((unused))
5379
+tcg_out_opc_vpickve2gr_du(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
5380
+{
5381
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_DU, d, vj, uk1));
5382
+}
5383
+
5384
+/* Emits the `vreplvei.b vd, vj, uk4` instruction. */
5385
+static void __attribute__((unused))
5386
+tcg_out_opc_vreplvei_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5387
+{
5388
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VREPLVEI_B, vd, vj, uk4));
5389
+}
5390
+
5391
+/* Emits the `vreplvei.h vd, vj, uk3` instruction. */
5392
+static void __attribute__((unused))
5393
+tcg_out_opc_vreplvei_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5394
+{
5395
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VREPLVEI_H, vd, vj, uk3));
5396
+}
5397
+
5398
+/* Emits the `vreplvei.w vd, vj, uk2` instruction. */
5399
+static void __attribute__((unused))
5400
+tcg_out_opc_vreplvei_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk2)
5401
+{
5402
+ tcg_out32(s, encode_vdvjuk2_insn(OPC_VREPLVEI_W, vd, vj, uk2));
5403
+}
5404
+
5405
+/* Emits the `vreplvei.d vd, vj, uk1` instruction. */
5406
+static void __attribute__((unused))
5407
+tcg_out_opc_vreplvei_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk1)
5408
+{
5409
+ tcg_out32(s, encode_vdvjuk1_insn(OPC_VREPLVEI_D, vd, vj, uk1));
5410
+}
5411
+
5412
+/* Emits the `vsllwil.h.b vd, vj, uk3` instruction. */
5413
+static void __attribute__((unused))
5414
+tcg_out_opc_vsllwil_h_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5415
+{
5416
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_H_B, vd, vj, uk3));
5417
+}
5418
+
5419
+/* Emits the `vsllwil.w.h vd, vj, uk4` instruction. */
5420
+static void __attribute__((unused))
5421
+tcg_out_opc_vsllwil_w_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5422
+{
5423
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_W_H, vd, vj, uk4));
5424
+}
5425
+
5426
+/* Emits the `vsllwil.d.w vd, vj, uk5` instruction. */
5427
+static void __attribute__((unused))
5428
+tcg_out_opc_vsllwil_d_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5429
+{
5430
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_D_W, vd, vj, uk5));
5431
+}
5432
+
5433
+/* Emits the `vextl.q.d vd, vj` instruction. */
5434
+static void __attribute__((unused))
5435
+tcg_out_opc_vextl_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
5436
+{
5437
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_Q_D, vd, vj));
5438
+}
5439
+
5440
+/* Emits the `vsllwil.hu.bu vd, vj, uk3` instruction. */
5441
+static void __attribute__((unused))
5442
+tcg_out_opc_vsllwil_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5443
+{
5444
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_HU_BU, vd, vj, uk3));
5445
+}
5446
+
5447
+/* Emits the `vsllwil.wu.hu vd, vj, uk4` instruction. */
5448
+static void __attribute__((unused))
5449
+tcg_out_opc_vsllwil_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5450
+{
5451
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_WU_HU, vd, vj, uk4));
5452
+}
5453
+
5454
+/* Emits the `vsllwil.du.wu vd, vj, uk5` instruction. */
5455
+static void __attribute__((unused))
5456
+tcg_out_opc_vsllwil_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5457
+{
5458
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_DU_WU, vd, vj, uk5));
5459
+}
5460
+
5461
+/* Emits the `vextl.qu.du vd, vj` instruction. */
5462
+static void __attribute__((unused))
5463
+tcg_out_opc_vextl_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
5464
+{
5465
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_QU_DU, vd, vj));
5466
+}
5467
+
5468
+/* Emits the `vbitclri.b vd, vj, uk3` instruction. */
5469
+static void __attribute__((unused))
5470
+tcg_out_opc_vbitclri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5471
+{
5472
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITCLRI_B, vd, vj, uk3));
5473
+}
5474
+
5475
+/* Emits the `vbitclri.h vd, vj, uk4` instruction. */
5476
+static void __attribute__((unused))
5477
+tcg_out_opc_vbitclri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5478
+{
5479
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITCLRI_H, vd, vj, uk4));
5480
+}
5481
+
5482
+/* Emits the `vbitclri.w vd, vj, uk5` instruction. */
5483
+static void __attribute__((unused))
5484
+tcg_out_opc_vbitclri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5485
+{
5486
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITCLRI_W, vd, vj, uk5));
5487
+}
5488
+
5489
+/* Emits the `vbitclri.d vd, vj, uk6` instruction. */
5490
+static void __attribute__((unused))
5491
+tcg_out_opc_vbitclri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5492
+{
5493
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITCLRI_D, vd, vj, uk6));
5494
+}
5495
+
5496
+/* Emits the `vbitseti.b vd, vj, uk3` instruction. */
5497
+static void __attribute__((unused))
5498
+tcg_out_opc_vbitseti_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5499
+{
5500
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITSETI_B, vd, vj, uk3));
5501
+}
5502
+
5503
+/* Emits the `vbitseti.h vd, vj, uk4` instruction. */
5504
+static void __attribute__((unused))
5505
+tcg_out_opc_vbitseti_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5506
+{
5507
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITSETI_H, vd, vj, uk4));
5508
+}
5509
+
5510
+/* Emits the `vbitseti.w vd, vj, uk5` instruction. */
5511
+static void __attribute__((unused))
5512
+tcg_out_opc_vbitseti_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5513
+{
5514
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITSETI_W, vd, vj, uk5));
5515
+}
5516
+
5517
+/* Emits the `vbitseti.d vd, vj, uk6` instruction. */
5518
+static void __attribute__((unused))
5519
+tcg_out_opc_vbitseti_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5520
+{
5521
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITSETI_D, vd, vj, uk6));
5522
+}
5523
+
5524
+/* Emits the `vbitrevi.b vd, vj, uk3` instruction. */
5525
+static void __attribute__((unused))
5526
+tcg_out_opc_vbitrevi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5527
+{
5528
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITREVI_B, vd, vj, uk3));
5529
+}
5530
+
5531
+/* Emits the `vbitrevi.h vd, vj, uk4` instruction. */
5532
+static void __attribute__((unused))
5533
+tcg_out_opc_vbitrevi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5534
+{
5535
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITREVI_H, vd, vj, uk4));
5536
+}
5537
+
5538
+/* Emits the `vbitrevi.w vd, vj, uk5` instruction. */
5539
+static void __attribute__((unused))
5540
+tcg_out_opc_vbitrevi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5541
+{
5542
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITREVI_W, vd, vj, uk5));
5543
+}
5544
+
5545
+/* Emits the `vbitrevi.d vd, vj, uk6` instruction. */
5546
+static void __attribute__((unused))
5547
+tcg_out_opc_vbitrevi_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5548
+{
5549
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITREVI_D, vd, vj, uk6));
5550
+}
5551
+
5552
+/* Emits the `vsat.b vd, vj, uk3` instruction. */
5553
+static void __attribute__((unused))
5554
+tcg_out_opc_vsat_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5555
+{
5556
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_B, vd, vj, uk3));
5557
+}
5558
+
5559
+/* Emits the `vsat.h vd, vj, uk4` instruction. */
5560
+static void __attribute__((unused))
5561
+tcg_out_opc_vsat_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5562
+{
5563
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_H, vd, vj, uk4));
5564
+}
5565
+
5566
+/* Emits the `vsat.w vd, vj, uk5` instruction. */
5567
+static void __attribute__((unused))
5568
+tcg_out_opc_vsat_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5569
+{
5570
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_W, vd, vj, uk5));
5571
+}
5572
+
5573
+/* Emits the `vsat.d vd, vj, uk6` instruction. */
5574
+static void __attribute__((unused))
5575
+tcg_out_opc_vsat_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5576
+{
5577
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_D, vd, vj, uk6));
5578
+}
5579
+
5580
+/* Emits the `vsat.bu vd, vj, uk3` instruction. */
5581
+static void __attribute__((unused))
5582
+tcg_out_opc_vsat_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5583
+{
5584
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_BU, vd, vj, uk3));
5585
+}
5586
+
5587
+/* Emits the `vsat.hu vd, vj, uk4` instruction. */
5588
+static void __attribute__((unused))
5589
+tcg_out_opc_vsat_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5590
+{
5591
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_HU, vd, vj, uk4));
5592
+}
5593
+
5594
+/* Emits the `vsat.wu vd, vj, uk5` instruction. */
5595
+static void __attribute__((unused))
5596
+tcg_out_opc_vsat_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5597
+{
5598
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_WU, vd, vj, uk5));
5599
+}
5600
+
5601
+/* Emits the `vsat.du vd, vj, uk6` instruction. */
5602
+static void __attribute__((unused))
5603
+tcg_out_opc_vsat_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5604
+{
5605
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_DU, vd, vj, uk6));
5606
+}
5607
+
5608
+/* Emits the `vslli.b vd, vj, uk3` instruction. */
5609
+static void __attribute__((unused))
5610
+tcg_out_opc_vslli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5611
+{
5612
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLI_B, vd, vj, uk3));
5613
+}
5614
+
5615
+/* Emits the `vslli.h vd, vj, uk4` instruction. */
5616
+static void __attribute__((unused))
5617
+tcg_out_opc_vslli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5618
+{
5619
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLI_H, vd, vj, uk4));
5620
+}
5621
+
5622
+/* Emits the `vslli.w vd, vj, uk5` instruction. */
5623
+static void __attribute__((unused))
5624
+tcg_out_opc_vslli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5625
+{
5626
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLI_W, vd, vj, uk5));
5627
+}
5628
+
5629
+/* Emits the `vslli.d vd, vj, uk6` instruction. */
5630
+static void __attribute__((unused))
5631
+tcg_out_opc_vslli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5632
+{
5633
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSLLI_D, vd, vj, uk6));
5634
+}
5635
+
5636
+/* Emits the `vsrli.b vd, vj, uk3` instruction. */
5637
+static void __attribute__((unused))
5638
+tcg_out_opc_vsrli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5639
+{
5640
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLI_B, vd, vj, uk3));
5641
+}
5642
+
5643
+/* Emits the `vsrli.h vd, vj, uk4` instruction. */
5644
+static void __attribute__((unused))
5645
+tcg_out_opc_vsrli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5646
+{
5647
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLI_H, vd, vj, uk4));
5648
+}
5649
+
5650
+/* Emits the `vsrli.w vd, vj, uk5` instruction. */
5651
+static void __attribute__((unused))
5652
+tcg_out_opc_vsrli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5653
+{
5654
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLI_W, vd, vj, uk5));
5655
+}
5656
+
5657
+/* Emits the `vsrli.d vd, vj, uk6` instruction. */
5658
+static void __attribute__((unused))
5659
+tcg_out_opc_vsrli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5660
+{
5661
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLI_D, vd, vj, uk6));
5662
+}
5663
+
5664
+/* Emits the `vsrai.b vd, vj, uk3` instruction. */
5665
+static void __attribute__((unused))
5666
+tcg_out_opc_vsrai_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5667
+{
5668
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRAI_B, vd, vj, uk3));
5669
+}
5670
+
5671
+/* Emits the `vsrai.h vd, vj, uk4` instruction. */
5672
+static void __attribute__((unused))
5673
+tcg_out_opc_vsrai_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5674
+{
5675
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRAI_H, vd, vj, uk4));
5676
+}
5677
+
5678
+/* Emits the `vsrai.w vd, vj, uk5` instruction. */
5679
+static void __attribute__((unused))
5680
+tcg_out_opc_vsrai_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5681
+{
5682
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRAI_W, vd, vj, uk5));
5683
+}
5684
+
5685
+/* Emits the `vsrai.d vd, vj, uk6` instruction. */
5686
+static void __attribute__((unused))
5687
+tcg_out_opc_vsrai_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5688
+{
5689
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRAI_D, vd, vj, uk6));
5690
+}
5691
+
5692
+/* Emits the `vsrlni.b.h vd, vj, uk4` instruction. */
5693
+static void __attribute__((unused))
5694
+tcg_out_opc_vsrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5695
+{
5696
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLNI_B_H, vd, vj, uk4));
5697
+}
5698
+
5699
+/* Emits the `vsrlni.h.w vd, vj, uk5` instruction. */
5700
+static void __attribute__((unused))
5701
+tcg_out_opc_vsrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5702
+{
5703
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLNI_H_W, vd, vj, uk5));
5704
+}
5705
+
5706
+/* Emits the `vsrlni.w.d vd, vj, uk6` instruction. */
5707
+static void __attribute__((unused))
5708
+tcg_out_opc_vsrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5709
+{
5710
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLNI_W_D, vd, vj, uk6));
5711
+}
5712
+
5713
+/* Emits the `vsrlni.d.q vd, vj, uk7` instruction. */
5714
+static void __attribute__((unused))
5715
+tcg_out_opc_vsrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5716
+{
5717
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLNI_D_Q, vd, vj, uk7));
5718
+}
5719
+
5720
+/* Emits the `vsrlrni.b.h vd, vj, uk4` instruction. */
5721
+static void __attribute__((unused))
5722
+tcg_out_opc_vsrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5723
+{
5724
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRNI_B_H, vd, vj, uk4));
5725
+}
5726
+
5727
+/* Emits the `vsrlrni.h.w vd, vj, uk5` instruction. */
5728
+static void __attribute__((unused))
5729
+tcg_out_opc_vsrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5730
+{
5731
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRNI_H_W, vd, vj, uk5));
5732
+}
5733
+
5734
+/* Emits the `vsrlrni.w.d vd, vj, uk6` instruction. */
5735
+static void __attribute__((unused))
5736
+tcg_out_opc_vsrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5737
+{
5738
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRNI_W_D, vd, vj, uk6));
5739
+}
5740
+
5741
+/* Emits the `vsrlrni.d.q vd, vj, uk7` instruction. */
5742
+static void __attribute__((unused))
5743
+tcg_out_opc_vsrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5744
+{
5745
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLRNI_D_Q, vd, vj, uk7));
5746
+}
5747
+
5748
+/* Emits the `vssrlni.b.h vd, vj, uk4` instruction. */
5749
+static void __attribute__((unused))
5750
+tcg_out_opc_vssrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5751
+{
5752
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_B_H, vd, vj, uk4));
5753
+}
5754
+
5755
+/* Emits the `vssrlni.h.w vd, vj, uk5` instruction. */
5756
+static void __attribute__((unused))
5757
+tcg_out_opc_vssrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5758
+{
5759
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_H_W, vd, vj, uk5));
5760
+}
5761
+
5762
+/* Emits the `vssrlni.w.d vd, vj, uk6` instruction. */
5763
+static void __attribute__((unused))
5764
+tcg_out_opc_vssrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5765
+{
5766
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_W_D, vd, vj, uk6));
5767
+}
5768
+
5769
+/* Emits the `vssrlni.d.q vd, vj, uk7` instruction. */
5770
+static void __attribute__((unused))
5771
+tcg_out_opc_vssrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5772
+{
5773
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_D_Q, vd, vj, uk7));
5774
+}
5775
+
5776
+/* Emits the `vssrlni.bu.h vd, vj, uk4` instruction. */
5777
+static void __attribute__((unused))
5778
+tcg_out_opc_vssrlni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5779
+{
5780
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_BU_H, vd, vj, uk4));
5781
+}
5782
+
5783
+/* Emits the `vssrlni.hu.w vd, vj, uk5` instruction. */
5784
+static void __attribute__((unused))
5785
+tcg_out_opc_vssrlni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5786
+{
5787
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_HU_W, vd, vj, uk5));
5788
+}
5789
+
5790
+/* Emits the `vssrlni.wu.d vd, vj, uk6` instruction. */
5791
+static void __attribute__((unused))
5792
+tcg_out_opc_vssrlni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5793
+{
5794
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_WU_D, vd, vj, uk6));
5795
+}
5796
+
5797
+/* Emits the `vssrlni.du.q vd, vj, uk7` instruction. */
5798
+static void __attribute__((unused))
5799
+tcg_out_opc_vssrlni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5800
+{
5801
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_DU_Q, vd, vj, uk7));
5802
+}
5803
+
5804
+/* Emits the `vssrlrni.b.h vd, vj, uk4` instruction. */
5805
+static void __attribute__((unused))
5806
+tcg_out_opc_vssrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5807
+{
5808
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_B_H, vd, vj, uk4));
5809
+}
5810
+
5811
+/* Emits the `vssrlrni.h.w vd, vj, uk5` instruction. */
5812
+static void __attribute__((unused))
5813
+tcg_out_opc_vssrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5814
+{
5815
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_H_W, vd, vj, uk5));
5816
+}
5817
+
5818
+/* Emits the `vssrlrni.w.d vd, vj, uk6` instruction. */
5819
+static void __attribute__((unused))
5820
+tcg_out_opc_vssrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5821
+{
5822
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_W_D, vd, vj, uk6));
5823
+}
5824
+
5825
+/* Emits the `vssrlrni.d.q vd, vj, uk7` instruction. */
5826
+static void __attribute__((unused))
5827
+tcg_out_opc_vssrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5828
+{
5829
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_D_Q, vd, vj, uk7));
5830
+}
5831
+
5832
+/* Emits the `vssrlrni.bu.h vd, vj, uk4` instruction. */
5833
+static void __attribute__((unused))
5834
+tcg_out_opc_vssrlrni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5835
+{
5836
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_BU_H, vd, vj, uk4));
5837
+}
5838
+
5839
+/* Emits the `vssrlrni.hu.w vd, vj, uk5` instruction. */
5840
+static void __attribute__((unused))
5841
+tcg_out_opc_vssrlrni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5842
+{
5843
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_HU_W, vd, vj, uk5));
5844
+}
5845
+
5846
+/* Emits the `vssrlrni.wu.d vd, vj, uk6` instruction. */
5847
+static void __attribute__((unused))
5848
+tcg_out_opc_vssrlrni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5849
+{
5850
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_WU_D, vd, vj, uk6));
5851
+}
5852
+
5853
+/* Emits the `vssrlrni.du.q vd, vj, uk7` instruction. */
5854
+static void __attribute__((unused))
5855
+tcg_out_opc_vssrlrni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5856
+{
5857
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_DU_Q, vd, vj, uk7));
5858
+}
5859
+
5860
+/* Emits the `vsrani.b.h vd, vj, uk4` instruction. */
5861
+static void __attribute__((unused))
5862
+tcg_out_opc_vsrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5863
+{
5864
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRANI_B_H, vd, vj, uk4));
5865
+}
5866
+
5867
+/* Emits the `vsrani.h.w vd, vj, uk5` instruction. */
5868
+static void __attribute__((unused))
5869
+tcg_out_opc_vsrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5870
+{
5871
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRANI_H_W, vd, vj, uk5));
5872
+}
5873
+
5874
+/* Emits the `vsrani.w.d vd, vj, uk6` instruction. */
5875
+static void __attribute__((unused))
5876
+tcg_out_opc_vsrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5877
+{
5878
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRANI_W_D, vd, vj, uk6));
5879
+}
5880
+
5881
+/* Emits the `vsrani.d.q vd, vj, uk7` instruction. */
5882
+static void __attribute__((unused))
5883
+tcg_out_opc_vsrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5884
+{
5885
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRANI_D_Q, vd, vj, uk7));
5886
+}
5887
+
5888
+/* Emits the `vsrarni.b.h vd, vj, uk4` instruction. */
5889
+static void __attribute__((unused))
5890
+tcg_out_opc_vsrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5891
+{
5892
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARNI_B_H, vd, vj, uk4));
5893
+}
5894
+
5895
+/* Emits the `vsrarni.h.w vd, vj, uk5` instruction. */
5896
+static void __attribute__((unused))
5897
+tcg_out_opc_vsrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5898
+{
5899
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARNI_H_W, vd, vj, uk5));
5900
+}
5901
+
5902
+/* Emits the `vsrarni.w.d vd, vj, uk6` instruction. */
5903
+static void __attribute__((unused))
5904
+tcg_out_opc_vsrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5905
+{
5906
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARNI_W_D, vd, vj, uk6));
5907
+}
5908
+
5909
+/* Emits the `vsrarni.d.q vd, vj, uk7` instruction. */
5910
+static void __attribute__((unused))
5911
+tcg_out_opc_vsrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5912
+{
5913
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRARNI_D_Q, vd, vj, uk7));
5914
+}
5915
+
5916
+/* Emits the `vssrani.b.h vd, vj, uk4` instruction. */
5917
+static void __attribute__((unused))
5918
+tcg_out_opc_vssrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5919
+{
5920
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_B_H, vd, vj, uk4));
5921
+}
5922
+
5923
+/* Emits the `vssrani.h.w vd, vj, uk5` instruction. */
5924
+static void __attribute__((unused))
5925
+tcg_out_opc_vssrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5926
+{
5927
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_H_W, vd, vj, uk5));
5928
+}
5929
+
5930
+/* Emits the `vssrani.w.d vd, vj, uk6` instruction. */
5931
+static void __attribute__((unused))
5932
+tcg_out_opc_vssrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5933
+{
5934
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_W_D, vd, vj, uk6));
5935
+}
5936
+
5937
+/* Emits the `vssrani.d.q vd, vj, uk7` instruction. */
5938
+static void __attribute__((unused))
5939
+tcg_out_opc_vssrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5940
+{
5941
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_D_Q, vd, vj, uk7));
5942
+}
5943
+
5944
+/* Emits the `vssrani.bu.h vd, vj, uk4` instruction. */
5945
+static void __attribute__((unused))
5946
+tcg_out_opc_vssrani_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5947
+{
5948
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_BU_H, vd, vj, uk4));
5949
+}
5950
+
5951
+/* Emits the `vssrani.hu.w vd, vj, uk5` instruction. */
5952
+static void __attribute__((unused))
5953
+tcg_out_opc_vssrani_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5954
+{
5955
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_HU_W, vd, vj, uk5));
5956
+}
5957
+
5958
+/* Emits the `vssrani.wu.d vd, vj, uk6` instruction. */
5959
+static void __attribute__((unused))
5960
+tcg_out_opc_vssrani_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5961
+{
5962
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_WU_D, vd, vj, uk6));
5963
+}
5964
+
5965
+/* Emits the `vssrani.du.q vd, vj, uk7` instruction. */
5966
+static void __attribute__((unused))
5967
+tcg_out_opc_vssrani_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5968
+{
5969
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_DU_Q, vd, vj, uk7));
5970
+}
5971
+
5972
+/* Emits the `vssrarni.b.h vd, vj, uk4` instruction. */
5973
+static void __attribute__((unused))
5974
+tcg_out_opc_vssrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5975
+{
5976
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_B_H, vd, vj, uk4));
5977
+}
5978
+
5979
+/* Emits the `vssrarni.h.w vd, vj, uk5` instruction. */
5980
+static void __attribute__((unused))
5981
+tcg_out_opc_vssrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5982
+{
5983
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_H_W, vd, vj, uk5));
5984
+}
5985
+
5986
+/* Emits the `vssrarni.w.d vd, vj, uk6` instruction. */
5987
+static void __attribute__((unused))
5988
+tcg_out_opc_vssrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5989
+{
5990
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_W_D, vd, vj, uk6));
5991
+}
5992
+
5993
+/* Emits the `vssrarni.d.q vd, vj, uk7` instruction. */
5994
+static void __attribute__((unused))
5995
+tcg_out_opc_vssrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5996
+{
5997
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_D_Q, vd, vj, uk7));
5998
+}
5999
+
6000
+/* Emits the `vssrarni.bu.h vd, vj, uk4` instruction. */
6001
+static void __attribute__((unused))
6002
+tcg_out_opc_vssrarni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
6003
+{
6004
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_BU_H, vd, vj, uk4));
6005
+}
6006
+
6007
+/* Emits the `vssrarni.hu.w vd, vj, uk5` instruction. */
6008
+static void __attribute__((unused))
6009
+tcg_out_opc_vssrarni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
6010
+{
6011
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_HU_W, vd, vj, uk5));
6012
+}
6013
+
6014
+/* Emits the `vssrarni.wu.d vd, vj, uk6` instruction. */
6015
+static void __attribute__((unused))
6016
+tcg_out_opc_vssrarni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
6017
+{
6018
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_WU_D, vd, vj, uk6));
6019
+}
6020
+
6021
+/* Emits the `vssrarni.du.q vd, vj, uk7` instruction. */
6022
+static void __attribute__((unused))
6023
+tcg_out_opc_vssrarni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
6024
+{
6025
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_DU_Q, vd, vj, uk7));
6026
+}
6027
+
6028
+/* Emits the `vextrins.d vd, vj, uk8` instruction. */
6029
+static void __attribute__((unused))
6030
+tcg_out_opc_vextrins_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6031
+{
6032
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_D, vd, vj, uk8));
6033
+}
6034
+
6035
+/* Emits the `vextrins.w vd, vj, uk8` instruction. */
6036
+static void __attribute__((unused))
6037
+tcg_out_opc_vextrins_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6038
+{
6039
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_W, vd, vj, uk8));
6040
+}
6041
+
6042
+/* Emits the `vextrins.h vd, vj, uk8` instruction. */
6043
+static void __attribute__((unused))
6044
+tcg_out_opc_vextrins_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6045
+{
6046
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_H, vd, vj, uk8));
6047
+}
6048
+
6049
+/* Emits the `vextrins.b vd, vj, uk8` instruction. */
6050
+static void __attribute__((unused))
6051
+tcg_out_opc_vextrins_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6052
+{
6053
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_B, vd, vj, uk8));
6054
+}
6055
+
6056
+/* Emits the `vshuf4i.b vd, vj, uk8` instruction. */
6057
+static void __attribute__((unused))
6058
+tcg_out_opc_vshuf4i_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6059
+{
6060
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_B, vd, vj, uk8));
6061
+}
6062
+
6063
+/* Emits the `vshuf4i.h vd, vj, uk8` instruction. */
6064
+static void __attribute__((unused))
6065
+tcg_out_opc_vshuf4i_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6066
+{
6067
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_H, vd, vj, uk8));
6068
+}
6069
+
6070
+/* Emits the `vshuf4i.w vd, vj, uk8` instruction. */
6071
+static void __attribute__((unused))
6072
+tcg_out_opc_vshuf4i_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6073
+{
6074
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_W, vd, vj, uk8));
6075
+}
6076
+
6077
+/* Emits the `vshuf4i.d vd, vj, uk8` instruction. */
6078
+static void __attribute__((unused))
6079
+tcg_out_opc_vshuf4i_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6080
+{
6081
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_D, vd, vj, uk8));
6082
+}
6083
+
6084
+/* Emits the `vbitseli.b vd, vj, uk8` instruction. */
6085
+static void __attribute__((unused))
6086
+tcg_out_opc_vbitseli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6087
+{
6088
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VBITSELI_B, vd, vj, uk8));
6089
+}
6090
+
6091
+/* Emits the `vandi.b vd, vj, uk8` instruction. */
6092
+static void __attribute__((unused))
6093
+tcg_out_opc_vandi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6094
+{
6095
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VANDI_B, vd, vj, uk8));
6096
+}
6097
+
6098
+/* Emits the `vori.b vd, vj, uk8` instruction. */
6099
+static void __attribute__((unused))
6100
+tcg_out_opc_vori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6101
+{
6102
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VORI_B, vd, vj, uk8));
6103
+}
6104
+
6105
+/* Emits the `vxori.b vd, vj, uk8` instruction. */
6106
+static void __attribute__((unused))
6107
+tcg_out_opc_vxori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6108
+{
6109
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VXORI_B, vd, vj, uk8));
6110
+}
6111
+
6112
+/* Emits the `vnori.b vd, vj, uk8` instruction. */
6113
+static void __attribute__((unused))
6114
+tcg_out_opc_vnori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6115
+{
6116
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VNORI_B, vd, vj, uk8));
6117
+}
6118
+
6119
+/* Emits the `vldi vd, sj13` instruction. */
6120
+static void __attribute__((unused))
6121
+tcg_out_opc_vldi(TCGContext *s, TCGReg vd, int32_t sj13)
6122
+{
6123
+ tcg_out32(s, encode_vdsj13_insn(OPC_VLDI, vd, sj13));
6124
+}
6125
+
6126
+/* Emits the `vpermi.w vd, vj, uk8` instruction. */
6127
+static void __attribute__((unused))
6128
+tcg_out_opc_vpermi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6129
+{
6130
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VPERMI_W, vd, vj, uk8));
6131
+}
6132
+
6133
/* End of generated code. */
6134
--
55
--
6135
2.34.1
56
2.43.0
diff view generated by jsdifflib
1
This hook may emit code at the beginning of the TB.
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
2
3
Suggested-by: Jordan Niethe <jniethe5@gmail.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
tcg/tcg.c | 3 +++
5
tcg/optimize.c | 20 +++++++++++++++++---
8
tcg/aarch64/tcg-target.c.inc | 5 +++++
6
1 file changed, 17 insertions(+), 3 deletions(-)
9
tcg/arm/tcg-target.c.inc | 5 +++++
10
tcg/i386/tcg-target.c.inc | 5 +++++
11
tcg/loongarch64/tcg-target.c.inc | 5 +++++
12
tcg/mips/tcg-target.c.inc | 5 +++++
13
tcg/ppc/tcg-target.c.inc | 5 +++++
14
tcg/riscv/tcg-target.c.inc | 5 +++++
15
tcg/s390x/tcg-target.c.inc | 5 +++++
16
tcg/sparc64/tcg-target.c.inc | 5 +++++
17
tcg/tci/tcg-target.c.inc | 5 +++++
18
11 files changed, 53 insertions(+)
19
7
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
21
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
10
--- a/tcg/optimize.c
23
+++ b/tcg/tcg.c
11
+++ b/tcg/optimize.c
24
@@ -XXX,XX +XXX,XX @@ static void tcg_register_jit_int(const void *buf, size_t size,
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
25
__attribute__((unused));
13
return ts_info(arg_temp(arg));
26
27
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
28
+static void tcg_out_tb_start(TCGContext *s);
29
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
30
intptr_t arg2);
31
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
32
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
33
s->gen_insn_data =
34
tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
35
36
+ tcg_out_tb_start(s);
37
+
38
num_insns = -1;
39
QTAILQ_FOREACH(op, &s->ops, link) {
40
TCGOpcode opc = op->opc;
41
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/aarch64/tcg-target.c.inc
44
+++ b/tcg/aarch64/tcg-target.c.inc
45
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
46
tcg_out_insn(s, 3207, RET, TCG_REG_LR);
47
}
14
}
48
15
49
+static void tcg_out_tb_start(TCGContext *s)
16
+static inline bool ti_is_const(TempOptInfo *ti)
50
+{
17
+{
51
+ /* nothing to do */
18
+ return ti->is_const;
52
+}
19
+}
53
+
20
+
54
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
55
{
56
int i;
57
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/arm/tcg-target.c.inc
60
+++ b/tcg/arm/tcg-target.c.inc
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_epilogue(TCGContext *s)
62
(1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
63
}
64
65
+static void tcg_out_tb_start(TCGContext *s)
66
+{
22
+{
67
+ /* nothing to do */
23
+ return ti->val;
68
+}
24
+}
69
+
25
+
70
typedef struct {
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
71
DebugFrameHeader h;
72
uint8_t fde_def_cfa[4];
73
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/i386/tcg-target.c.inc
76
+++ b/tcg/i386/tcg-target.c.inc
77
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
78
tcg_out_opc(s, OPC_RET, 0, 0, 0);
79
}
80
81
+static void tcg_out_tb_start(TCGContext *s)
82
+{
27
+{
83
+ /* nothing to do */
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
84
+}
29
+}
85
+
30
+
86
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
31
static inline bool ts_is_const(TCGTemp *ts)
87
{
32
{
88
memset(p, 0x90, count);
33
- return ts_info(ts)->is_const;
89
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
34
+ return ti_is_const(ts_info(ts));
90
index XXXXXXX..XXXXXXX 100644
91
--- a/tcg/loongarch64/tcg-target.c.inc
92
+++ b/tcg/loongarch64/tcg-target.c.inc
93
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
94
tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
95
}
35
}
96
36
97
+static void tcg_out_tb_start(TCGContext *s)
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
98
+{
99
+ /* nothing to do */
100
+}
101
+
102
static void tcg_target_init(TCGContext *s)
103
{
38
{
104
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
39
- TempOptInfo *ti = ts_info(ts);
105
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
40
- return ti->is_const && ti->val == val;
106
index XXXXXXX..XXXXXXX 100644
41
+ return ti_is_const_val(ts_info(ts), val);
107
--- a/tcg/mips/tcg-target.c.inc
108
+++ b/tcg/mips/tcg-target.c.inc
109
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
110
tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
111
}
42
}
112
43
113
+static void tcg_out_tb_start(TCGContext *s)
44
static inline bool arg_is_const(TCGArg arg)
114
+{
115
+ /* nothing to do */
116
+}
117
+
118
static void tcg_target_init(TCGContext *s)
119
{
120
tcg_target_detect_isa();
121
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/ppc/tcg-target.c.inc
124
+++ b/tcg/ppc/tcg-target.c.inc
125
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
126
tcg_out32(s, BCLR | BO_ALWAYS);
127
}
128
129
+static void tcg_out_tb_start(TCGContext *s)
130
+{
131
+ /* nothing to do */
132
+}
133
+
134
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
135
{
136
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
137
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tcg/riscv/tcg-target.c.inc
140
+++ b/tcg/riscv/tcg-target.c.inc
141
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
142
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
143
}
144
145
+static void tcg_out_tb_start(TCGContext *s)
146
+{
147
+ /* nothing to do */
148
+}
149
+
150
static volatile sig_atomic_t got_sigill;
151
152
static void sigill_handler(int signo, siginfo_t *si, void *data)
153
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
154
index XXXXXXX..XXXXXXX 100644
155
--- a/tcg/s390x/tcg-target.c.inc
156
+++ b/tcg/s390x/tcg-target.c.inc
157
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
158
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
159
}
160
161
+static void tcg_out_tb_start(TCGContext *s)
162
+{
163
+ /* nothing to do */
164
+}
165
+
166
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
167
{
168
memset(p, 0x07, count * sizeof(tcg_insn_unit));
169
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/sparc64/tcg-target.c.inc
172
+++ b/tcg/sparc64/tcg-target.c.inc
173
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
174
tcg_out_movi_s13(s, TCG_REG_O0, 0);
175
}
176
177
+static void tcg_out_tb_start(TCGContext *s)
178
+{
179
+ /* nothing to do */
180
+}
181
+
182
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
183
{
184
int i;
185
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
186
index XXXXXXX..XXXXXXX 100644
187
--- a/tcg/tci/tcg-target.c.inc
188
+++ b/tcg/tci/tcg-target.c.inc
189
@@ -XXX,XX +XXX,XX @@ static inline void tcg_target_qemu_prologue(TCGContext *s)
190
{
191
}
192
193
+static void tcg_out_tb_start(TCGContext *s)
194
+{
195
+ /* nothing to do */
196
+}
197
+
198
bool tcg_target_has_memory_bswap(MemOp memop)
199
{
200
return true;
201
--
45
--
202
2.34.1
46
2.43.0
203
204
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
2
4
3
Signed-off-by: Jiajie Chen <c@jia.je>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-13-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/loongarch64/tcg-target-con-set.h | 1 +
8
tcg/optimize.c | 15 ++++++++++-----
9
tcg/loongarch64/tcg-target.h | 2 +-
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
tcg/loongarch64/tcg-target.c.inc | 11 ++++++++++-
11
3 files changed, 12 insertions(+), 2 deletions(-)
12
10
13
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/loongarch64/tcg-target-con-set.h
13
--- a/tcg/optimize.c
16
+++ b/tcg/loongarch64/tcg-target-con-set.h
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rZ, rZ)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
18
C_O1_I2(w, w, w)
16
19
C_O1_I2(w, w, wM)
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
20
C_O1_I2(w, w, wA)
21
+C_O1_I3(w, w, w, w)
22
C_O1_I4(r, rZ, rJ, rZ, rZ)
23
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/loongarch64/tcg-target.h
26
+++ b/tcg/loongarch64/tcg-target.h
27
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
28
#define TCG_TARGET_HAS_rotv_vec 0
29
#define TCG_TARGET_HAS_sat_vec 1
30
#define TCG_TARGET_HAS_minmax_vec 1
31
-#define TCG_TARGET_HAS_bitsel_vec 0
32
+#define TCG_TARGET_HAS_bitsel_vec 1
33
#define TCG_TARGET_HAS_cmpsel_vec 0
34
35
#define TCG_TARGET_DEFAULT_MO (0)
36
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
37
index XXXXXXX..XXXXXXX 100644
38
--- a/tcg/loongarch64/tcg-target.c.inc
39
+++ b/tcg/loongarch64/tcg-target.c.inc
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
const int const_args[TCG_MAX_OP_ARGS])
42
{
18
{
43
TCGType type = vecl + TCG_TYPE_V64;
19
- uint64_t z_mask;
44
- TCGArg a0, a1, a2;
20
+ uint64_t z_mask, s_mask;
45
+ TCGArg a0, a1, a2, a3;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
46
TCGReg temp = TCG_REG_TMP0;
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
47
TCGReg temp_vec = TCG_VEC_TMP0;
23
48
24
- if (arg_is_const(op->args[1])) {
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
25
- uint64_t t = arg_info(op->args[1])->val;
50
a0 = args[0];
26
+ if (ti_is_const(t1)) {
51
a1 = args[1];
27
+ uint64_t t = ti_const_val(t1);
52
a2 = args[2];
28
53
+ a3 = args[3];
29
if (t != 0) {
54
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
55
/* Currently only supports V128 */
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
56
tcg_debug_assert(type == TCG_TYPE_V128);
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
58
case INDEX_op_sarv_vec:
59
tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
60
break;
61
+ case INDEX_op_bitsel_vec:
62
+ /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
63
+ tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
64
+ break;
65
case INDEX_op_dupm_vec:
66
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
67
break;
68
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
69
case INDEX_op_shlv_vec:
70
case INDEX_op_shrv_vec:
71
case INDEX_op_sarv_vec:
72
+ case INDEX_op_bitsel_vec:
73
return 1;
74
default:
75
return 0;
76
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
77
case INDEX_op_neg_vec:
78
return C_O1_I1(w, w);
79
80
+ case INDEX_op_bitsel_vec:
81
+ return C_O1_I3(w, w, w, w);
82
+
83
default:
32
default:
84
g_assert_not_reached();
33
g_assert_not_reached();
85
}
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
86
--
45
--
87
2.34.1
46
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
2
3
3
LSX support on host cpu is detected via hwcap.
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
Lower the following ops to LSX:
6
7
- dup_vec
8
- dupi_vec
9
- dupm_vec
10
- ld_vec
11
- st_vec
12
13
Signed-off-by: Jiajie Chen <c@jia.je>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <20230908022302.180442-3-c@jia.je>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
6
---
18
tcg/loongarch64/tcg-target-con-set.h | 2 +
7
tcg/optimize.c | 13 ++++++++++---
19
tcg/loongarch64/tcg-target-con-str.h | 1 +
8
1 file changed, 10 insertions(+), 3 deletions(-)
20
tcg/loongarch64/tcg-target.h | 38 ++++-
21
tcg/loongarch64/tcg-target.opc.h | 12 ++
22
tcg/loongarch64/tcg-target.c.inc | 219 ++++++++++++++++++++++++++-
23
5 files changed, 270 insertions(+), 2 deletions(-)
24
create mode 100644 tcg/loongarch64/tcg-target.opc.h
25
9
26
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
27
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/loongarch64/tcg-target-con-set.h
12
--- a/tcg/optimize.c
29
+++ b/tcg/loongarch64/tcg-target-con-set.h
13
+++ b/tcg/optimize.c
30
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
31
C_O0_I1(r)
15
return true;
32
C_O0_I2(rZ, r)
33
C_O0_I2(rZ, rZ)
34
+C_O0_I2(w, r)
35
C_O1_I1(r, r)
36
+C_O1_I1(w, r)
37
C_O1_I2(r, r, rC)
38
C_O1_I2(r, r, ri)
39
C_O1_I2(r, r, rI)
40
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/loongarch64/tcg-target-con-str.h
43
+++ b/tcg/loongarch64/tcg-target-con-str.h
44
@@ -XXX,XX +XXX,XX @@
45
* REGS(letter, register_mask)
46
*/
47
REGS('r', ALL_GENERAL_REGS)
48
+REGS('w', ALL_VECTOR_REGS)
49
50
/*
51
* Define constraint letters for constants:
52
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/loongarch64/tcg-target.h
55
+++ b/tcg/loongarch64/tcg-target.h
56
@@ -XXX,XX +XXX,XX @@
57
#define LOONGARCH_TCG_TARGET_H
58
59
#define TCG_TARGET_INSN_UNIT_SIZE 4
60
-#define TCG_TARGET_NB_REGS 32
61
+#define TCG_TARGET_NB_REGS 64
62
63
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
64
65
@@ -XXX,XX +XXX,XX @@ typedef enum {
66
TCG_REG_S7,
67
TCG_REG_S8,
68
69
+ TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
70
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
71
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
72
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
73
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
74
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
75
+ TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
76
+ TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
77
+
78
/* aliases */
79
TCG_AREG0 = TCG_REG_S0,
80
TCG_REG_TMP0 = TCG_REG_T8,
81
TCG_REG_TMP1 = TCG_REG_T7,
82
TCG_REG_TMP2 = TCG_REG_T6,
83
+ TCG_VEC_TMP0 = TCG_REG_V23,
84
} TCGReg;
85
86
+extern bool use_lsx_instructions;
87
+
88
/* used for function call generation */
89
#define TCG_REG_CALL_STACK TCG_REG_SP
90
#define TCG_TARGET_STACK_ALIGN 16
91
@@ -XXX,XX +XXX,XX @@ typedef enum {
92
93
#define TCG_TARGET_HAS_qemu_ldst_i128 0
94
95
+#define TCG_TARGET_HAS_v64 0
96
+#define TCG_TARGET_HAS_v128 use_lsx_instructions
97
+#define TCG_TARGET_HAS_v256 0
98
+
99
+#define TCG_TARGET_HAS_not_vec 0
100
+#define TCG_TARGET_HAS_neg_vec 0
101
+#define TCG_TARGET_HAS_abs_vec 0
102
+#define TCG_TARGET_HAS_andc_vec 0
103
+#define TCG_TARGET_HAS_orc_vec 0
104
+#define TCG_TARGET_HAS_nand_vec 0
105
+#define TCG_TARGET_HAS_nor_vec 0
106
+#define TCG_TARGET_HAS_eqv_vec 0
107
+#define TCG_TARGET_HAS_mul_vec 0
108
+#define TCG_TARGET_HAS_shi_vec 0
109
+#define TCG_TARGET_HAS_shs_vec 0
110
+#define TCG_TARGET_HAS_shv_vec 0
111
+#define TCG_TARGET_HAS_roti_vec 0
112
+#define TCG_TARGET_HAS_rots_vec 0
113
+#define TCG_TARGET_HAS_rotv_vec 0
114
+#define TCG_TARGET_HAS_sat_vec 0
115
+#define TCG_TARGET_HAS_minmax_vec 0
116
+#define TCG_TARGET_HAS_bitsel_vec 0
117
+#define TCG_TARGET_HAS_cmpsel_vec 0
118
+
119
#define TCG_TARGET_DEFAULT_MO (0)
120
121
#define TCG_TARGET_NEED_LDST_LABELS
122
diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target.opc.h
123
new file mode 100644
124
index XXXXXXX..XXXXXXX
125
--- /dev/null
126
+++ b/tcg/loongarch64/tcg-target.opc.h
127
@@ -XXX,XX +XXX,XX @@
128
+/*
129
+ * Copyright (c) 2023 Jiajie Chen
130
+ *
131
+ * This work is licensed under the terms of the GNU GPL, version 2 or
132
+ * (at your option) any later version.
133
+ *
134
+ * See the COPYING file in the top-level directory for details.
135
+ *
136
+ * Target-specific opcodes for host vector expansion. These will be
137
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
138
+ * consider these to be UNSPEC with names.
139
+ */
140
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/loongarch64/tcg-target.c.inc
143
+++ b/tcg/loongarch64/tcg-target.c.inc
144
@@ -XXX,XX +XXX,XX @@
145
#include "../tcg-ldst.c.inc"
146
#include <asm/hwcap.h>
147
148
+bool use_lsx_instructions;
149
+
150
#ifdef CONFIG_DEBUG_TCG
151
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
152
"zero",
153
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
154
"s5",
155
"s6",
156
"s7",
157
- "s8"
158
+ "s8",
159
+ "vr0",
160
+ "vr1",
161
+ "vr2",
162
+ "vr3",
163
+ "vr4",
164
+ "vr5",
165
+ "vr6",
166
+ "vr7",
167
+ "vr8",
168
+ "vr9",
169
+ "vr10",
170
+ "vr11",
171
+ "vr12",
172
+ "vr13",
173
+ "vr14",
174
+ "vr15",
175
+ "vr16",
176
+ "vr17",
177
+ "vr18",
178
+ "vr19",
179
+ "vr20",
180
+ "vr21",
181
+ "vr22",
182
+ "vr23",
183
+ "vr24",
184
+ "vr25",
185
+ "vr26",
186
+ "vr27",
187
+ "vr28",
188
+ "vr29",
189
+ "vr30",
190
+ "vr31",
191
};
192
#endif
193
194
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
195
TCG_REG_A2,
196
TCG_REG_A1,
197
TCG_REG_A0,
198
+
199
+ /* Vector registers */
200
+ TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
201
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
202
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
203
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
204
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
205
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
206
+ /* V24 - V31 are caller-saved, and skipped. */
207
};
208
209
static const int tcg_target_call_iarg_regs[] = {
210
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
211
#define TCG_CT_CONST_WSZ 0x2000
212
213
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
214
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
215
216
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
217
{
218
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
219
}
220
}
16
}
221
17
222
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
223
+ TCGReg rd, TCGReg rs)
224
+{
19
+{
225
+ switch (vece) {
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
226
+ case MO_8:
227
+ tcg_out_opc_vreplgr2vr_b(s, rd, rs);
228
+ break;
229
+ case MO_16:
230
+ tcg_out_opc_vreplgr2vr_h(s, rd, rs);
231
+ break;
232
+ case MO_32:
233
+ tcg_out_opc_vreplgr2vr_w(s, rd, rs);
234
+ break;
235
+ case MO_64:
236
+ tcg_out_opc_vreplgr2vr_d(s, rd, rs);
237
+ break;
238
+ default:
239
+ g_assert_not_reached();
240
+ }
241
+ return true;
242
+}
21
+}
243
+
22
+
244
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
245
+ TCGReg r, TCGReg base, intptr_t offset)
24
{
246
+{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
247
+ /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
248
+ if (offset < -0x800 || offset > 0x7ff || \
27
249
+ (offset & ((1 << vece) - 1)) != 0) {
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
250
+ tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
29
{
251
+ base = TCG_REG_TMP0;
30
+ uint64_t z_mask;
252
+ offset = 0;
253
+ }
254
+ offset >>= vece;
255
+
31
+
256
+ switch (vece) {
32
if (fold_const1(ctx, op)) {
257
+ case MO_8:
33
return true;
258
+ tcg_out_opc_vldrepl_b(s, r, base, offset);
34
}
259
+ break;
35
260
+ case MO_16:
36
switch (ctx->type) {
261
+ tcg_out_opc_vldrepl_h(s, r, base, offset);
37
case TCG_TYPE_I32:
262
+ break;
38
- ctx->z_mask = 32 | 31;
263
+ case MO_32:
39
+ z_mask = 32 | 31;
264
+ tcg_out_opc_vldrepl_w(s, r, base, offset);
40
break;
265
+ break;
41
case TCG_TYPE_I64:
266
+ case MO_64:
42
- ctx->z_mask = 64 | 63;
267
+ tcg_out_opc_vldrepl_d(s, r, base, offset);
43
+ z_mask = 64 | 63;
268
+ break;
44
break;
269
+ default:
270
+ g_assert_not_reached();
271
+ }
272
+ return true;
273
+}
274
+
275
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
276
+ TCGReg rd, int64_t v64)
277
+{
278
+ /* Try vldi if imm can fit */
279
+ int64_t value = sextract64(v64, 0, 8 << vece);
280
+ if (-0x200 <= value && value <= 0x1FF) {
281
+ uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
282
+ tcg_out_opc_vldi(s, rd, imm);
283
+ return;
284
+ }
285
+
286
+ /* TODO: vldi patterns when imm 12 is set */
287
+
288
+ /* Fallback to vreplgr2vr */
289
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
290
+ switch (vece) {
291
+ case MO_8:
292
+ tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
293
+ break;
294
+ case MO_16:
295
+ tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
296
+ break;
297
+ case MO_32:
298
+ tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
299
+ break;
300
+ case MO_64:
301
+ tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
302
+ break;
303
+ default:
304
+ g_assert_not_reached();
305
+ }
306
+}
307
+
308
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
309
+ unsigned vecl, unsigned vece,
310
+ const TCGArg args[TCG_MAX_OP_ARGS],
311
+ const int const_args[TCG_MAX_OP_ARGS])
312
+{
313
+ TCGType type = vecl + TCG_TYPE_V64;
314
+ TCGArg a0, a1, a2;
315
+ TCGReg temp = TCG_REG_TMP0;
316
+
317
+ a0 = args[0];
318
+ a1 = args[1];
319
+ a2 = args[2];
320
+
321
+ /* Currently only supports V128 */
322
+ tcg_debug_assert(type == TCG_TYPE_V128);
323
+
324
+ switch (opc) {
325
+ case INDEX_op_st_vec:
326
+ /* Try to fit vst imm */
327
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
328
+ tcg_out_opc_vst(s, a0, a1, a2);
329
+ } else {
330
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
331
+ tcg_out_opc_vstx(s, a0, a1, temp);
332
+ }
333
+ break;
334
+ case INDEX_op_ld_vec:
335
+ /* Try to fit vld imm */
336
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
337
+ tcg_out_opc_vld(s, a0, a1, a2);
338
+ } else {
339
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
340
+ tcg_out_opc_vldx(s, a0, a1, temp);
341
+ }
342
+ break;
343
+ case INDEX_op_dupm_vec:
344
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
345
+ break;
346
+ default:
347
+ g_assert_not_reached();
348
+ }
349
+}
350
+
351
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
352
+{
353
+ switch (opc) {
354
+ case INDEX_op_ld_vec:
355
+ case INDEX_op_st_vec:
356
+ case INDEX_op_dup_vec:
357
+ case INDEX_op_dupm_vec:
358
+ return 1;
359
+ default:
360
+ return 0;
361
+ }
362
+}
363
+
364
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
365
+ TCGArg a0, ...)
366
+{
367
+ g_assert_not_reached();
368
+}
369
+
370
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
371
{
372
switch (op) {
373
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
374
case INDEX_op_movcond_i64:
375
return C_O1_I4(r, rZ, rJ, rZ, rZ);
376
377
+ case INDEX_op_ld_vec:
378
+ case INDEX_op_dupm_vec:
379
+ case INDEX_op_dup_vec:
380
+ return C_O1_I1(w, r);
381
+
382
+ case INDEX_op_st_vec:
383
+ return C_O0_I2(w, r);
384
+
385
default:
45
default:
386
g_assert_not_reached();
46
g_assert_not_reached();
387
}
47
}
388
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
48
- return false;
389
exit(EXIT_FAILURE);
49
+ return fold_masks_z(ctx, op, z_mask);
390
}
391
392
+ if (hwcap & HWCAP_LOONGARCH_LSX) {
393
+ use_lsx_instructions = 1;
394
+ }
395
+
396
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
397
tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
398
399
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
400
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
401
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
402
403
+ if (use_lsx_instructions) {
404
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
405
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
406
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
407
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
408
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
409
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
410
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
411
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
412
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
413
+ }
414
+
415
s->reserved_regs = 0;
416
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
417
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
418
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
419
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
420
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
421
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
422
+ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
423
}
50
}
424
51
425
typedef struct {
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
426
--
53
--
427
2.34.1
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
2
3
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-5-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/loongarch64/tcg-target-con-set.h | 1 +
7
tcg/optimize.c | 29 ++++++++++++-----------------
9
tcg/loongarch64/tcg-target-con-str.h | 1 +
8
1 file changed, 12 insertions(+), 17 deletions(-)
10
tcg/loongarch64/tcg-target.c.inc | 65 ++++++++++++++++++++++++++++
11
3 files changed, 67 insertions(+)
12
9
13
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/loongarch64/tcg-target-con-set.h
12
--- a/tcg/optimize.c
16
+++ b/tcg/loongarch64/tcg-target-con-set.h
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
18
C_O1_I2(r, rZ, ri)
15
19
C_O1_I2(r, rZ, rJ)
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
C_O1_I2(r, rZ, rZ)
17
{
21
+C_O1_I2(w, w, wM)
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
22
C_O1_I4(r, rZ, rJ, rZ, rZ)
19
+ uint64_t s_mask_old, s_mask, z_mask;
23
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
20
bool type_change = false;
24
index XXXXXXX..XXXXXXX 100644
21
+ TempOptInfo *t1;
25
--- a/tcg/loongarch64/tcg-target-con-str.h
22
26
+++ b/tcg/loongarch64/tcg-target-con-str.h
23
if (fold_const1(ctx, op)) {
27
@@ -XXX,XX +XXX,XX @@ CONST('U', TCG_CT_CONST_U12)
28
CONST('Z', TCG_CT_CONST_ZERO)
29
CONST('C', TCG_CT_CONST_C12)
30
CONST('W', TCG_CT_CONST_WSZ)
31
+CONST('M', TCG_CT_CONST_VCMP)
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/loongarch64/tcg-target.c.inc
35
+++ b/tcg/loongarch64/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
37
#define TCG_CT_CONST_U12 0x800
38
#define TCG_CT_CONST_C12 0x1000
39
#define TCG_CT_CONST_WSZ 0x2000
40
+#define TCG_CT_CONST_VCMP 0x4000
41
42
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
43
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
44
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
45
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
46
return true;
24
return true;
47
}
25
}
48
+ int64_t vec_val = sextract64(val, 0, 8 << vece);
26
49
+ if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
27
- z_mask = arg_info(op->args[1])->z_mask;
50
+ return true;
28
- s_mask = arg_info(op->args[1])->s_mask;
51
+ }
29
+ t1 = arg_info(op->args[1]);
52
return false;
30
+ z_mask = t1->z_mask;
53
}
31
+ s_mask = t1->s_mask;
54
32
s_mask_old = s_mask;
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
33
56
TCGType type = vecl + TCG_TYPE_V64;
34
switch (op->opc) {
57
TCGArg a0, a1, a2;
35
CASE_OP_32_64(ext8s):
58
TCGReg temp = TCG_REG_TMP0;
36
- sign = INT8_MIN;
59
+ TCGReg temp_vec = TCG_VEC_TMP0;
37
- z_mask = (uint8_t)z_mask;
60
+
38
+ s_mask |= INT8_MIN;
61
+ static const LoongArchInsn cmp_vec_insn[16][4] = {
39
+ z_mask = (int8_t)z_mask;
62
+ [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
63
+ [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
64
+ [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
65
+ [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
66
+ [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
67
+ };
68
+ static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
69
+ [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
70
+ [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
71
+ [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
72
+ [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
73
+ [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
74
+ };
75
+ LoongArchInsn insn;
76
77
a0 = args[0];
78
a1 = args[1];
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
80
tcg_out_opc_vldx(s, a0, a1, temp);
81
}
82
break;
40
break;
83
+ case INDEX_op_cmp_vec:
41
CASE_OP_32_64(ext16s):
84
+ TCGCond cond = args[3];
42
- sign = INT16_MIN;
85
+ if (const_args[2]) {
43
- z_mask = (uint16_t)z_mask;
86
+ /*
44
+ s_mask |= INT16_MIN;
87
+ * cmp_vec dest, src, value
45
+ z_mask = (int16_t)z_mask;
88
+ * Try vseqi/vslei/vslti
89
+ */
90
+ int64_t value = sextract64(a2, 0, 8 << vece);
91
+ if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
92
+ cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
93
+ tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
94
+ a0, a1, value));
95
+ break;
96
+ } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
97
+ (0x00 <= value && value <= 0x1f)) {
98
+ tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
99
+ a0, a1, value));
100
+ break;
101
+ }
102
+
103
+ /*
104
+ * Fallback to:
105
+ * dupi_vec temp, a2
106
+ * cmp_vec a0, a1, temp, cond
107
+ */
108
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
109
+ a2 = temp_vec;
110
+ }
111
+
112
+ insn = cmp_vec_insn[cond][vece];
113
+ if (insn == 0) {
114
+ TCGArg t;
115
+ t = a1, a1 = a2, a2 = t;
116
+ cond = tcg_swap_cond(cond);
117
+ insn = cmp_vec_insn[cond][vece];
118
+ tcg_debug_assert(insn != 0);
119
+ }
120
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
121
+ break;
122
case INDEX_op_dupm_vec:
123
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
124
break;
46
break;
125
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
47
case INDEX_op_ext_i32_i64:
126
case INDEX_op_st_vec:
48
type_change = true;
127
case INDEX_op_dup_vec:
49
QEMU_FALLTHROUGH;
128
case INDEX_op_dupm_vec:
50
case INDEX_op_ext32s_i64:
129
+ case INDEX_op_cmp_vec:
51
- sign = INT32_MIN;
130
return 1;
52
- z_mask = (uint32_t)z_mask;
131
default:
53
+ s_mask |= INT32_MIN;
132
return 0;
54
+ z_mask = (int32_t)z_mask;
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
55
break;
134
case INDEX_op_st_vec:
135
return C_O0_I2(w, r);
136
137
+ case INDEX_op_cmp_vec:
138
+ return C_O1_I2(w, w, wM);
139
+
140
default:
56
default:
141
g_assert_not_reached();
57
g_assert_not_reached();
142
}
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
143
--
76
--
144
2.34.1
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
32
}
33
- return false;
34
+ return finish_folding(ctx, op);
35
}
36
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
--
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
1
From: Nicholas Piggin <npiggin@gmail.com>
1
Avoid the use of the OptContext slots.
2
2
3
mttcg asserts that an execution ending with EXCP_HALTED must have
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
cpu->halted. However between the event or instruction that sets
5
cpu->halted and requests exit and the assertion here, an
6
asynchronous event could clear cpu->halted.
7
8
This leads to crashes running AIX on ppc/pseries because it uses
9
H_CEDE/H_PROD hcalls, where H_CEDE sets self->halted = 1 and
10
H_PROD sets other cpu->halted = 0 and kicks it.
11
12
H_PROD could be turned into an interrupt to wake, but several other
13
places in ppc, sparc, and semihosting follow what looks like a similar
14
pattern setting halted = 0 directly. So remove this assertion.
15
16
Reported-by: Ivan Warren <ivan@vmfacility.fr>
17
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
18
Message-Id: <20230829010658.8252-1-npiggin@gmail.com>
19
[rth: Keep the case label and adjust the comment.]
20
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
---
5
---
22
accel/tcg/tcg-accel-ops-mttcg.c | 9 ++-------
6
tcg/optimize.c | 9 ++-------
23
1 file changed, 2 insertions(+), 7 deletions(-)
7
1 file changed, 2 insertions(+), 7 deletions(-)
24
8
25
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
26
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/tcg-accel-ops-mttcg.c
11
--- a/tcg/optimize.c
28
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
12
+++ b/tcg/optimize.c
29
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
30
break;
14
{
31
case EXCP_HALTED:
15
/* Set to 1 all bits to the left of the rightmost. */
32
/*
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
33
- * during start-up the vCPU is reset and the thread is
17
- ctx->z_mask = -(z_mask & -z_mask);
34
- * kicked several times. If we don't ensure we go back
18
+ z_mask = -(z_mask & -z_mask);
35
- * to sleep in the halted state we won't cleanly
19
36
- * start-up when the vCPU is enabled.
20
- /*
37
- *
21
- * Because of fold_sub_to_neg, we want to always return true,
38
- * cpu->halted should ensure we sleep in wait_io_event
22
- * via finish_folding.
39
+ * Usually cpu->halted is set, but may have already been
23
- */
40
+ * reset by another thread by the time we arrive here.
24
- finish_folding(ctx, op);
41
*/
25
- return true;
42
- g_assert(cpu->halted);
26
+ return fold_masks_z(ctx, op, z_mask);
43
break;
27
}
44
case EXCP_ATOMIC:
28
45
qemu_mutex_unlock_iothread();
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
46
--
30
--
47
2.34.1
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Tested-by: Song Gao <gaosong@loongson.cn>
3
Reviewed-by: Song Gao <gaosong@loongson.cn>
4
Message-Id: <20230831030904.1194667-2-richard.henderson@linaro.org>
5
---
9
---
6
accel/tcg/tcg-runtime.h | 25 ++++++
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
7
include/tcg/tcg-op-gvec-common.h | 6 ++
11
1 file changed, 21 insertions(+), 5 deletions(-)
8
accel/tcg/tcg-runtime-gvec.c | 26 ++++++
9
tcg/tcg-op-gvec.c | 149 +++++++++++++++++++++++++++++++
10
4 files changed, 206 insertions(+)
11
12
12
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/tcg-runtime.h
15
--- a/tcg/optimize.c
15
+++ b/accel/tcg/tcg-runtime.h
16
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
17
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
return fold_masks_s(ctx, op, s_mask);
18
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
}
19
20
20
+DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
21
+DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
22
+DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
23
{
23
+DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
24
+
43
+
25
+DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
26
+DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
27
+DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
28
+DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
29
+
30
+DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
31
+DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
32
+DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
33
+DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
34
+
35
+DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
36
+DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
37
+DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
38
+DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
39
+
40
+DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
41
+DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
42
+DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
43
+DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
44
+
45
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
46
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/include/tcg/tcg-op-gvec-common.h
49
+++ b/include/tcg/tcg-op-gvec-common.h
50
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
51
void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
52
uint32_t aofs, uint32_t bofs,
53
uint32_t oprsz, uint32_t maxsz);
54
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
55
+ uint32_t aofs, int64_t c,
56
+ uint32_t oprsz, uint32_t maxsz);
57
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
58
+ uint32_t aofs, TCGv_i64 c,
59
+ uint32_t oprsz, uint32_t maxsz);
60
61
/*
62
* Perform vector bit select: d = (b & a) | (c & ~a).
63
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/accel/tcg/tcg-runtime-gvec.c
66
+++ b/accel/tcg/tcg-runtime-gvec.c
67
@@ -XXX,XX +XXX,XX @@ DO_CMP2(64)
68
#undef DO_CMP1
69
#undef DO_CMP2
70
71
+#define DO_CMP1(NAME, TYPE, OP) \
72
+void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
73
+{ \
74
+ intptr_t oprsz = simd_oprsz(desc); \
75
+ TYPE inv = simd_data(desc), b = b64; \
76
+ for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
77
+ *(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
78
+ } \
79
+ clear_high(d, oprsz, desc); \
80
+}
45
+}
81
+
46
+
82
+#define DO_CMP2(SZ) \
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
83
+ DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
48
+{
84
+ DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
85
+ DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
50
+ ctx->prev_mb = NULL;
86
+ DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
51
+ return finish_folding(ctx, op);
87
+ DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
88
+
89
+DO_CMP2(8)
90
+DO_CMP2(16)
91
+DO_CMP2(32)
92
+DO_CMP2(64)
93
+
94
+#undef DO_CMP1
95
+#undef DO_CMP2
96
+
97
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
98
{
99
intptr_t oprsz = simd_oprsz(desc);
100
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg-op-gvec.c
103
+++ b/tcg/tcg-op-gvec.c
104
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
105
}
106
}
52
}
107
53
108
+static void expand_cmps_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
109
+ uint32_t oprsz, uint32_t tysz, TCGType type,
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
110
+ TCGCond cond, TCGv_vec c)
56
break;
111
+{
57
case INDEX_op_qemu_ld_a32_i32:
112
+ TCGv_vec t0 = tcg_temp_new_vec(type);
58
case INDEX_op_qemu_ld_a64_i32:
113
+ TCGv_vec t1 = tcg_temp_new_vec(type);
59
+ done = fold_qemu_ld_1reg(&ctx, op);
114
+ uint32_t i;
115
+
116
+ for (i = 0; i < oprsz; i += tysz) {
117
+ tcg_gen_ld_vec(t1, cpu_env, aofs + i);
118
+ tcg_gen_cmp_vec(cond, vece, t0, t1, c);
119
+ tcg_gen_st_vec(t0, cpu_env, dofs + i);
120
+ }
121
+}
122
+
123
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
124
+ uint32_t aofs, TCGv_i64 c,
125
+ uint32_t oprsz, uint32_t maxsz)
126
+{
127
+ static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
128
+ static gen_helper_gvec_2i * const eq_fn[4] = {
129
+ gen_helper_gvec_eqs8, gen_helper_gvec_eqs16,
130
+ gen_helper_gvec_eqs32, gen_helper_gvec_eqs64
131
+ };
132
+ static gen_helper_gvec_2i * const lt_fn[4] = {
133
+ gen_helper_gvec_lts8, gen_helper_gvec_lts16,
134
+ gen_helper_gvec_lts32, gen_helper_gvec_lts64
135
+ };
136
+ static gen_helper_gvec_2i * const le_fn[4] = {
137
+ gen_helper_gvec_les8, gen_helper_gvec_les16,
138
+ gen_helper_gvec_les32, gen_helper_gvec_les64
139
+ };
140
+ static gen_helper_gvec_2i * const ltu_fn[4] = {
141
+ gen_helper_gvec_ltus8, gen_helper_gvec_ltus16,
142
+ gen_helper_gvec_ltus32, gen_helper_gvec_ltus64
143
+ };
144
+ static gen_helper_gvec_2i * const leu_fn[4] = {
145
+ gen_helper_gvec_leus8, gen_helper_gvec_leus16,
146
+ gen_helper_gvec_leus32, gen_helper_gvec_leus64
147
+ };
148
+ static gen_helper_gvec_2i * const * const fns[16] = {
149
+ [TCG_COND_EQ] = eq_fn,
150
+ [TCG_COND_LT] = lt_fn,
151
+ [TCG_COND_LE] = le_fn,
152
+ [TCG_COND_LTU] = ltu_fn,
153
+ [TCG_COND_LEU] = leu_fn,
154
+ };
155
+
156
+ TCGType type;
157
+
158
+ check_size_align(oprsz, maxsz, dofs | aofs);
159
+ check_overlap_2(dofs, aofs, maxsz);
160
+
161
+ if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
162
+ do_dup(MO_8, dofs, oprsz, maxsz,
163
+ NULL, NULL, -(cond == TCG_COND_ALWAYS));
164
+ return;
165
+ }
166
+
167
+ /*
168
+ * Implement inline with a vector type, if possible.
169
+ * Prefer integer when 64-bit host and 64-bit comparison.
170
+ */
171
+ type = choose_vector_type(cmp_list, vece, oprsz,
172
+ TCG_TARGET_REG_BITS == 64 && vece == MO_64);
173
+ if (type != 0) {
174
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(cmp_list);
175
+ TCGv_vec t_vec = tcg_temp_new_vec(type);
176
+ uint32_t some;
177
+
178
+ tcg_gen_dup_i64_vec(vece, t_vec, c);
179
+ switch (type) {
180
+ case TCG_TYPE_V256:
181
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
182
+ expand_cmps_vec(vece, dofs, aofs, some, 32,
183
+ TCG_TYPE_V256, cond, t_vec);
184
+ aofs += some;
185
+ dofs += some;
186
+ oprsz -= some;
187
+ maxsz -= some;
188
+ /* fallthru */
189
+
190
+ case TCG_TYPE_V128:
191
+ some = QEMU_ALIGN_DOWN(oprsz, 16);
192
+ expand_cmps_vec(vece, dofs, aofs, some, 16,
193
+ TCG_TYPE_V128, cond, t_vec);
194
+ break;
60
+ break;
195
+
61
case INDEX_op_qemu_ld_a32_i64:
196
+ case TCG_TYPE_V64:
62
case INDEX_op_qemu_ld_a64_i64:
197
+ some = QEMU_ALIGN_DOWN(oprsz, 8);
63
+ if (TCG_TARGET_REG_BITS == 64) {
198
+ expand_cmps_vec(vece, dofs, aofs, some, 8,
64
+ done = fold_qemu_ld_1reg(&ctx, op);
199
+ TCG_TYPE_V64, cond, t_vec);
65
+ break;
200
+ break;
66
+ }
201
+
67
+ QEMU_FALLTHROUGH;
202
+ default:
68
case INDEX_op_qemu_ld_a32_i128:
203
+ g_assert_not_reached();
69
case INDEX_op_qemu_ld_a64_i128:
204
+ }
70
- done = fold_qemu_ld(&ctx, op);
205
+ tcg_temp_free_vec(t_vec);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
206
+ tcg_swap_vecop_list(hold_list);
72
break;
207
+ } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
73
case INDEX_op_qemu_st8_a32_i32:
208
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
74
case INDEX_op_qemu_st8_a64_i32:
209
+ uint32_t i;
210
+
211
+ for (i = 0; i < oprsz; i += 8) {
212
+ tcg_gen_ld_i64(t0, cpu_env, aofs + i);
213
+ tcg_gen_negsetcond_i64(cond, t0, t0, c);
214
+ tcg_gen_st_i64(t0, cpu_env, dofs + i);
215
+ }
216
+ tcg_temp_free_i64(t0);
217
+ } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
218
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
219
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
220
+ uint32_t i;
221
+
222
+ tcg_gen_extrl_i64_i32(t1, c);
223
+ for (i = 0; i < oprsz; i += 8) {
224
+ tcg_gen_ld_i32(t0, cpu_env, aofs + i);
225
+ tcg_gen_negsetcond_i32(cond, t0, t0, t1);
226
+ tcg_gen_st_i32(t0, cpu_env, dofs + i);
227
+ }
228
+ tcg_temp_free_i32(t0);
229
+ tcg_temp_free_i32(t1);
230
+ } else {
231
+ gen_helper_gvec_2i * const *fn = fns[cond];
232
+ bool inv = false;
233
+
234
+ if (fn == NULL) {
235
+ cond = tcg_invert_cond(cond);
236
+ fn = fns[cond];
237
+ assert(fn != NULL);
238
+ inv = true;
239
+ }
240
+ tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, inv, fn[vece]);
241
+ return;
242
+ }
243
+
244
+ if (oprsz < maxsz) {
245
+ expand_clr(dofs + oprsz, maxsz - oprsz);
246
+ }
247
+}
248
+
249
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
250
+ uint32_t aofs, int64_t c,
251
+ uint32_t oprsz, uint32_t maxsz)
252
+{
253
+ TCGv_i64 tmp = tcg_constant_i64(c);
254
+ tcg_gen_gvec_cmps(cond, vece, dofs, aofs, tmp, oprsz, maxsz);
255
+}
256
+
257
static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
258
{
259
TCGv_i64 t = tcg_temp_ebb_new_i64();
260
--
75
--
261
2.34.1
76
2.43.0
diff view generated by jsdifflib
New patch
1
Stores have no output operands, and so need no further work.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 11 +++++------
7
1 file changed, 5 insertions(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
14
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
16
ctx->prev_mb = NULL;
17
- return false;
18
+ return true;
19
}
20
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
28
}
29
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
38
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
41
TCGType type;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
56
}
57
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
59
--
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
7
---
3
accel/tcg/cputlb.c | 6 ++++--
8
tcg/optimize.c | 5 ++---
4
1 file changed, 4 insertions(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
5
10
6
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
8
--- a/accel/tcg/cputlb.c
13
--- a/tcg/optimize.c
9
+++ b/accel/tcg/cputlb.c
14
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
11
MMUAccessType type, uintptr_t ra)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
12
{
18
{
13
if (unlikely(p->flags & TLB_MMIO)) {
19
- uint64_t s_mask, z_mask, sign;
14
- return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
20
+ uint64_t s_mask, z_mask;
15
+ QEMU_IOTHREAD_LOCK_GUARD();
21
TempOptInfo *t1, *t2;
16
+ return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
22
17
} else {
23
if (fold_const2(ctx, op) ||
18
return *(uint8_t *)p->haddr;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
19
}
25
* If the sign bit is known zero, then logical right shift
20
@@ -XXX,XX +XXX,XX @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
26
* will not reduce the number of input sign repetitions.
21
int mmu_idx, uintptr_t ra)
27
*/
22
{
28
- sign = -s_mask;
23
if (unlikely(p->flags & TLB_MMIO)) {
29
- if (sign && !(z_mask & sign)) {
24
- io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
30
+ if (~z_mask & -s_mask) {
25
+ QEMU_IOTHREAD_LOCK_GUARD();
31
return fold_masks_s(ctx, op, s_mask);
26
+ do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
32
}
27
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
33
break;
28
/* nothing */
29
} else {
30
--
34
--
31
2.34.1
35
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
2
3
3
Lower the following ops:
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
- add_vec
6
- sub_vec
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230908022302.180442-6-c@jia.je>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
tcg/loongarch64/tcg-target-con-set.h | 1 +
7
tcg/optimize.c | 9 ++++++---
14
tcg/loongarch64/tcg-target-con-str.h | 1 +
8
1 file changed, 6 insertions(+), 3 deletions(-)
15
tcg/loongarch64/tcg-target.c.inc | 61 ++++++++++++++++++++++++++++
16
3 files changed, 63 insertions(+)
17
9
18
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
19
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/loongarch64/tcg-target-con-set.h
12
--- a/tcg/optimize.c
21
+++ b/tcg/loongarch64/tcg-target-con-set.h
13
+++ b/tcg/optimize.c
22
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rZ, ri)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
23
C_O1_I2(r, rZ, rJ)
15
fold_sub_to_neg(ctx, op)) {
24
C_O1_I2(r, rZ, rZ)
25
C_O1_I2(w, w, wM)
26
+C_O1_I2(w, w, wA)
27
C_O1_I4(r, rZ, rJ, rZ, rZ)
28
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/loongarch64/tcg-target-con-str.h
31
+++ b/tcg/loongarch64/tcg-target-con-str.h
32
@@ -XXX,XX +XXX,XX @@ CONST('Z', TCG_CT_CONST_ZERO)
33
CONST('C', TCG_CT_CONST_C12)
34
CONST('W', TCG_CT_CONST_WSZ)
35
CONST('M', TCG_CT_CONST_VCMP)
36
+CONST('A', TCG_CT_CONST_VADD)
37
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/loongarch64/tcg-target.c.inc
40
+++ b/tcg/loongarch64/tcg-target.c.inc
41
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
42
#define TCG_CT_CONST_C12 0x1000
43
#define TCG_CT_CONST_WSZ 0x2000
44
#define TCG_CT_CONST_VCMP 0x4000
45
+#define TCG_CT_CONST_VADD 0x8000
46
47
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
48
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
49
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
50
if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
51
return true;
16
return true;
52
}
17
}
53
+ if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
18
- return false;
54
+ return true;
19
+ return finish_folding(ctx, op);
55
+ }
56
return false;
57
}
20
}
58
21
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
60
}
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
61
}
38
}
62
39
63
+static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
64
+ const TCGArg a1, const TCGArg a2,
65
+ bool a2_is_const, bool is_add)
66
+{
67
+ static const LoongArchInsn add_vec_insn[4] = {
68
+ OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
69
+ };
70
+ static const LoongArchInsn add_vec_imm_insn[4] = {
71
+ OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
72
+ };
73
+ static const LoongArchInsn sub_vec_insn[4] = {
74
+ OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
75
+ };
76
+ static const LoongArchInsn sub_vec_imm_insn[4] = {
77
+ OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
78
+ };
79
+
80
+ if (a2_is_const) {
81
+ int64_t value = sextract64(a2, 0, 8 << vece);
82
+ if (!is_add) {
83
+ value = -value;
84
+ }
85
+
86
+ /* Try vaddi/vsubi */
87
+ if (0 <= value && value <= 0x1f) {
88
+ tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
89
+ a1, value));
90
+ return;
91
+ } else if (-0x1f <= value && value < 0) {
92
+ tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
93
+ a1, -value));
94
+ return;
95
+ }
96
+
97
+ /* constraint TCG_CT_CONST_VADD ensures unreachable */
98
+ g_assert_not_reached();
99
+ }
100
+
101
+ if (is_add) {
102
+ tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
103
+ } else {
104
+ tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
105
+ }
106
+}
107
+
108
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
109
unsigned vecl, unsigned vece,
110
const TCGArg args[TCG_MAX_OP_ARGS],
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
112
}
113
tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
114
break;
115
+ case INDEX_op_add_vec:
116
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
117
+ break;
118
+ case INDEX_op_sub_vec:
119
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
120
+ break;
121
case INDEX_op_dupm_vec:
122
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
123
break;
124
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
125
case INDEX_op_dup_vec:
126
case INDEX_op_dupm_vec:
127
case INDEX_op_cmp_vec:
128
+ case INDEX_op_add_vec:
129
+ case INDEX_op_sub_vec:
130
return 1;
131
default:
132
return 0;
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_cmp_vec:
135
return C_O1_I2(w, w, wM);
136
137
+ case INDEX_op_add_vec:
138
+ case INDEX_op_sub_vec:
139
+ return C_O1_I2(w, w, wA);
140
+
141
default:
142
g_assert_not_reached();
143
}
144
--
41
--
145
2.34.1
42
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Avoid the use of the OptContext slots.
2
2
3
Lower the following ops:
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
- and_vec
6
- andc_vec
7
- or_vec
8
- orc_vec
9
- xor_vec
10
- nor_vec
11
- not_vec
12
13
Signed-off-by: Jiajie Chen <c@jia.je>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <20230908022302.180442-7-c@jia.je>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
5
---
18
tcg/loongarch64/tcg-target-con-set.h | 2 ++
6
tcg/optimize.c | 16 +++++++++-------
19
tcg/loongarch64/tcg-target.h | 8 ++---
7
1 file changed, 9 insertions(+), 7 deletions(-)
20
tcg/loongarch64/tcg-target.c.inc | 44 ++++++++++++++++++++++++++++
21
3 files changed, 50 insertions(+), 4 deletions(-)
22
8
23
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/loongarch64/tcg-target-con-set.h
11
--- a/tcg/optimize.c
26
+++ b/tcg/loongarch64/tcg-target-con-set.h
12
+++ b/tcg/optimize.c
27
@@ -XXX,XX +XXX,XX @@ C_O0_I2(rZ, rZ)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
28
C_O0_I2(w, r)
14
29
C_O1_I1(r, r)
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
30
C_O1_I1(w, r)
16
{
31
+C_O1_I1(w, w)
17
+ uint64_t z_mask = -1, s_mask = 0;
32
C_O1_I2(r, r, rC)
18
+
33
C_O1_I2(r, r, ri)
19
/* We can't do any folding with a load, but we can record bits. */
34
C_O1_I2(r, r, rI)
20
switch (op->opc) {
35
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
21
CASE_OP_32_64(ld8s):
36
C_O1_I2(r, rZ, ri)
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
37
C_O1_I2(r, rZ, rJ)
23
+ s_mask = INT8_MIN;
38
C_O1_I2(r, rZ, rZ)
39
+C_O1_I2(w, w, w)
40
C_O1_I2(w, w, wM)
41
C_O1_I2(w, w, wA)
42
C_O1_I4(r, rZ, rJ, rZ, rZ)
43
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/loongarch64/tcg-target.h
46
+++ b/tcg/loongarch64/tcg-target.h
47
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
48
#define TCG_TARGET_HAS_v128 use_lsx_instructions
49
#define TCG_TARGET_HAS_v256 0
50
51
-#define TCG_TARGET_HAS_not_vec 0
52
+#define TCG_TARGET_HAS_not_vec 1
53
#define TCG_TARGET_HAS_neg_vec 0
54
#define TCG_TARGET_HAS_abs_vec 0
55
-#define TCG_TARGET_HAS_andc_vec 0
56
-#define TCG_TARGET_HAS_orc_vec 0
57
+#define TCG_TARGET_HAS_andc_vec 1
58
+#define TCG_TARGET_HAS_orc_vec 1
59
#define TCG_TARGET_HAS_nand_vec 0
60
-#define TCG_TARGET_HAS_nor_vec 0
61
+#define TCG_TARGET_HAS_nor_vec 1
62
#define TCG_TARGET_HAS_eqv_vec 0
63
#define TCG_TARGET_HAS_mul_vec 0
64
#define TCG_TARGET_HAS_shi_vec 0
65
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/loongarch64/tcg-target.c.inc
68
+++ b/tcg/loongarch64/tcg-target.c.inc
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
70
tcg_out_opc_vldx(s, a0, a1, temp);
71
}
72
break;
24
break;
73
+ case INDEX_op_and_vec:
25
CASE_OP_32_64(ld8u):
74
+ tcg_out_opc_vand_v(s, a0, a1, a2);
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
75
+ break;
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
76
+ case INDEX_op_andc_vec:
28
break;
77
+ /*
29
CASE_OP_32_64(ld16s):
78
+ * vandn vd, vj, vk: vd = vk & ~vj
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
79
+ * andc_vec vd, vj, vk: vd = vj & ~vk
31
+ s_mask = INT16_MIN;
80
+ * vk and vk are swapped
32
break;
81
+ */
33
CASE_OP_32_64(ld16u):
82
+ tcg_out_opc_vandn_v(s, a0, a2, a1);
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
83
+ break;
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
84
+ case INDEX_op_or_vec:
36
break;
85
+ tcg_out_opc_vor_v(s, a0, a1, a2);
37
case INDEX_op_ld32s_i64:
86
+ break;
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
87
+ case INDEX_op_orc_vec:
39
+ s_mask = INT32_MIN;
88
+ tcg_out_opc_vorn_v(s, a0, a1, a2);
40
break;
89
+ break;
41
case INDEX_op_ld32u_i64:
90
+ case INDEX_op_xor_vec:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
91
+ tcg_out_opc_vxor_v(s, a0, a1, a2);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
92
+ break;
44
break;
93
+ case INDEX_op_nor_vec:
94
+ tcg_out_opc_vnor_v(s, a0, a1, a2);
95
+ break;
96
+ case INDEX_op_not_vec:
97
+ tcg_out_opc_vnor_v(s, a0, a1, a1);
98
+ break;
99
case INDEX_op_cmp_vec:
100
TCGCond cond = args[3];
101
if (const_args[2]) {
102
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
103
case INDEX_op_cmp_vec:
104
case INDEX_op_add_vec:
105
case INDEX_op_sub_vec:
106
+ case INDEX_op_and_vec:
107
+ case INDEX_op_andc_vec:
108
+ case INDEX_op_or_vec:
109
+ case INDEX_op_orc_vec:
110
+ case INDEX_op_xor_vec:
111
+ case INDEX_op_nor_vec:
112
+ case INDEX_op_not_vec:
113
return 1;
114
default:
115
return 0;
116
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
117
case INDEX_op_sub_vec:
118
return C_O1_I2(w, w, wA);
119
120
+ case INDEX_op_and_vec:
121
+ case INDEX_op_andc_vec:
122
+ case INDEX_op_or_vec:
123
+ case INDEX_op_orc_vec:
124
+ case INDEX_op_xor_vec:
125
+ case INDEX_op_nor_vec:
126
+ return C_O1_I2(w, w, w);
127
+
128
+ case INDEX_op_not_vec:
129
+ return C_O1_I1(w, w);
130
+
131
default:
45
default:
132
g_assert_not_reached();
46
g_assert_not_reached();
133
}
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
134
--
53
--
135
2.34.1
54
2.43.0
diff view generated by jsdifflib
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
IA-64 and PA-RISC host support is already removed with commit
4
b1cef6d02f ("Drop remaining bits of ia64 host support").
5
6
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-Id: <20230810225922.21600-1-akihiko.odaki@daynix.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
3
---
10
util/oslib-posix.c | 15 +++------------
4
tcg/optimize.c | 2 +-
11
1 file changed, 3 insertions(+), 12 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
12
6
13
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
15
--- a/util/oslib-posix.c
9
--- a/tcg/optimize.c
16
+++ b/util/oslib-posix.c
10
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ char *qemu_get_pid_name(pid_t pid)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
18
12
TCGType type;
19
void *qemu_alloc_stack(size_t *sz)
13
20
{
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
21
- void *ptr, *guardpage;
15
- return false;
22
+ void *ptr;
16
+ return finish_folding(ctx, op);
23
int flags;
24
#ifdef CONFIG_DEBUG_STACK_USAGE
25
void *ptr2;
26
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
27
abort();
28
}
17
}
29
18
30
-#if defined(HOST_IA64)
19
type = ctx->type;
31
- /* separate register stack */
32
- guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
33
-#elif defined(HOST_HPPA)
34
- /* stack grows up */
35
- guardpage = ptr + *sz - pagesz;
36
-#else
37
- /* stack grows down */
38
- guardpage = ptr;
39
-#endif
40
- if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
41
+ /* Stack grows down -- guard page at the bottom. */
42
+ if (mprotect(ptr, pagesz, PROT_NONE) != 0) {
43
perror("failed to set up stack guard page");
44
abort();
45
}
46
--
20
--
47
2.34.1
21
2.43.0
diff view generated by jsdifflib
1
Now that we defer address space update and tlb_flush until
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
the next async_run_on_cpu, the plugin run at the end of the
2
Remove fold_masks as the function becomes unused.
3
instruction no longer has to contend with a flushed tlb.
4
Therefore, delete SavedIOTLB entirely.
5
3
6
Properly return false from tlb_plugin_lookup when we do
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
not have a tlb match.
8
9
Fixes a bug in which SavedIOTLB had stale data, because
10
there were multiple i/o accesses within a single insn.
11
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
include/hw/core/cpu.h | 13 -------
7
tcg/optimize.c | 18 ++++++++----------
15
include/qemu/typedefs.h | 1 -
8
1 file changed, 8 insertions(+), 10 deletions(-)
16
accel/tcg/cputlb.c | 79 ++++++++++++-----------------------------
17
3 files changed, 23 insertions(+), 70 deletions(-)
18
9
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
12
--- a/tcg/optimize.c
22
+++ b/include/hw/core/cpu.h
13
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
24
QTAILQ_ENTRY(CPUWatchpoint) entry;
15
return fold_masks_zs(ctx, op, -1, s_mask);
25
};
26
27
-#ifdef CONFIG_PLUGIN
28
-/*
29
- * For plugins we sometime need to save the resolved iotlb data before
30
- * the memory regions get moved around by io_writex.
31
- */
32
-typedef struct SavedIOTLB {
33
- MemoryRegionSection *section;
34
- hwaddr mr_offset;
35
-} SavedIOTLB;
36
-#endif
37
-
38
struct KVMState;
39
struct kvm_run;
40
41
@@ -XXX,XX +XXX,XX @@ struct CPUState {
42
43
#ifdef CONFIG_PLUGIN
44
GArray *plugin_mem_cbs;
45
- /* saved iotlb data from io_writex */
46
- SavedIOTLB saved_iotlb;
47
#endif
48
49
/* TODO Move common fields from CPUArchState here. */
50
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/include/qemu/typedefs.h
53
+++ b/include/qemu/typedefs.h
54
@@ -XXX,XX +XXX,XX @@ typedef struct QString QString;
55
typedef struct RAMBlock RAMBlock;
56
typedef struct Range Range;
57
typedef struct ReservedRegion ReservedRegion;
58
-typedef struct SavedIOTLB SavedIOTLB;
59
typedef struct SHPCDevice SHPCDevice;
60
typedef struct SSIBus SSIBus;
61
typedef struct TCGHelperInfo TCGHelperInfo;
62
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/accel/tcg/cputlb.c
65
+++ b/accel/tcg/cputlb.c
66
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
67
}
68
}
16
}
69
17
70
-/*
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
71
- * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
72
- * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
73
- * because of the side effect of io_writex changing memory layout.
74
- */
75
-static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
76
- hwaddr mr_offset)
77
-{
19
-{
78
-#ifdef CONFIG_PLUGIN
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
79
- SavedIOTLB *saved = &cs->saved_iotlb;
80
- saved->section = section;
81
- saved->mr_offset = mr_offset;
82
-#endif
83
-}
21
-}
84
-
22
-
85
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
23
/*
86
int mmu_idx, vaddr addr, uintptr_t retaddr,
24
* An "affected" mask bit is 0 if and only if the result is identical
87
MMUAccessType access_type, MemOp op)
25
* to the first input. Thus if the entire mask is 0, the operation
88
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
89
cpu_io_recompile(cpu, retaddr);
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask, s_mask;
31
+ TempOptInfo *t1, *t2;
32
+
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
90
}
38
}
91
39
92
- /*
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
93
- * The memory_region_dispatch may trigger a flush/resize
41
- | arg_info(op->args[2])->z_mask;
94
- * so for plugins we save the iotlb_data just in case.
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
95
- */
43
- & arg_info(op->args[2])->s_mask;
96
- save_iotlb_data(cpu, section, mr_offset);
44
- return fold_masks(ctx, op);
97
-
45
+ t1 = arg_info(op->args[1]);
98
{
46
+ t2 = arg_info(op->args[2]);
99
QEMU_IOTHREAD_LOCK_GUARD();
47
+ z_mask = t1->z_mask | t2->z_mask;
100
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
48
+ s_mask = t1->s_mask & t2->s_mask;
101
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
102
}
50
}
103
cpu->mem_io_pc = retaddr;
51
104
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
105
- /*
106
- * The memory_region_dispatch may trigger a flush/resize
107
- * so for plugins we save the iotlb_data just in case.
108
- */
109
- save_iotlb_data(cpu, section, mr_offset);
110
-
111
{
112
QEMU_IOTHREAD_LOCK_GUARD();
113
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
114
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
115
* in the softmmu lookup code (or helper). We don't handle re-fills or
116
* checking the victim table. This is purely informational.
117
*
118
- * This almost never fails as the memory access being instrumented
119
- * should have just filled the TLB. The one corner case is io_writex
120
- * which can cause TLB flushes and potential resizing of the TLBs
121
- * losing the information we need. In those cases we need to recover
122
- * data from a copy of the CPUTLBEntryFull. As long as this always occurs
123
- * from the same thread (which a mem callback will be) this is safe.
124
+ * The one corner case is i/o write, which can cause changes to the
125
+ * address space. Those changes, and the corresponding tlb flush,
126
+ * should be delayed until the next TB, so even then this ought not fail.
127
+ * But check, Just in Case.
128
*/
129
-
130
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
131
bool is_store, struct qemu_plugin_hwaddr *data)
132
{
133
CPUArchState *env = cpu->env_ptr;
134
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
135
uintptr_t index = tlb_index(env, mmu_idx, addr);
136
- uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
137
+ MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
138
+ uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
139
140
- if (likely(tlb_hit(tlb_addr, addr))) {
141
- /* We must have an iotlb entry for MMIO */
142
- if (tlb_addr & TLB_MMIO) {
143
- CPUTLBEntryFull *full;
144
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
145
- data->is_io = true;
146
- data->v.io.section =
147
- iotlb_to_section(cpu, full->xlat_section, full->attrs);
148
- data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
149
- } else {
150
- data->is_io = false;
151
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
152
- }
153
- return true;
154
- } else {
155
- SavedIOTLB *saved = &cpu->saved_iotlb;
156
- data->is_io = true;
157
- data->v.io.section = saved->section;
158
- data->v.io.offset = saved->mr_offset;
159
- return true;
160
+ if (unlikely(!tlb_hit(tlb_addr, addr))) {
161
+ return false;
162
}
163
-}
164
165
+ /* We must have an iotlb entry for MMIO */
166
+ if (tlb_addr & TLB_MMIO) {
167
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
168
+ hwaddr xlat = full->xlat_section;
169
+
170
+ data->is_io = true;
171
+ data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
172
+ data->v.io.section =
173
+ iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
174
+ } else {
175
+ data->is_io = false;
176
+ data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
177
+ }
178
+ return true;
179
+}
180
#endif
181
182
/*
183
--
53
--
184
2.34.1
54
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Reviewed-by: Song Gao <gaosong@loongson.cn>
3
Message-Id: <20230831030904.1194667-3-richard.henderson@linaro.org>
4
---
3
---
5
target/arm/tcg/translate.c | 56 ++++++--------------------------------
4
tcg/optimize.c | 2 +-
6
1 file changed, 9 insertions(+), 47 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
7
6
8
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
10
--- a/target/arm/tcg/translate.c
9
--- a/tcg/optimize.c
11
+++ b/target/arm/tcg/translate.c
10
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
13
gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
14
}
17
}
15
18
16
-#define GEN_CMP0(NAME, COND) \
19
/* Propagate constants and copies, fold constant expressions. */
17
- static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
18
- { \
19
- tcg_gen_negsetcond_i32(COND, d, a, tcg_constant_i32(0)); \
20
- } \
21
- static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
22
- { \
23
- tcg_gen_negsetcond_i64(COND, d, a, tcg_constant_i64(0)); \
24
- } \
25
- static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
26
- { \
27
- TCGv_vec zero = tcg_constant_vec_matching(d, vece, 0); \
28
- tcg_gen_cmp_vec(COND, vece, d, a, zero); \
29
- } \
30
- void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
31
- uint32_t opr_sz, uint32_t max_sz) \
32
- { \
33
- const GVecGen2 op[4] = { \
34
- { .fno = gen_helper_gvec_##NAME##0_b, \
35
- .fniv = gen_##NAME##0_vec, \
36
- .opt_opc = vecop_list_cmp, \
37
- .vece = MO_8 }, \
38
- { .fno = gen_helper_gvec_##NAME##0_h, \
39
- .fniv = gen_##NAME##0_vec, \
40
- .opt_opc = vecop_list_cmp, \
41
- .vece = MO_16 }, \
42
- { .fni4 = gen_##NAME##0_i32, \
43
- .fniv = gen_##NAME##0_vec, \
44
- .opt_opc = vecop_list_cmp, \
45
- .vece = MO_32 }, \
46
- { .fni8 = gen_##NAME##0_i64, \
47
- .fniv = gen_##NAME##0_vec, \
48
- .opt_opc = vecop_list_cmp, \
49
- .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
50
- .vece = MO_64 }, \
51
- }; \
52
- tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
53
- }
54
+#define GEN_CMP0(NAME, COND) \
55
+ void NAME(unsigned vece, uint32_t d, uint32_t m, \
56
+ uint32_t opr_sz, uint32_t max_sz) \
57
+ { tcg_gen_gvec_cmpi(COND, vece, d, m, 0, opr_sz, max_sz); }
58
59
-static const TCGOpcode vecop_list_cmp[] = {
60
- INDEX_op_cmp_vec, 0
61
-};
62
-
63
-GEN_CMP0(ceq, TCG_COND_EQ)
64
-GEN_CMP0(cle, TCG_COND_LE)
65
-GEN_CMP0(cge, TCG_COND_GE)
66
-GEN_CMP0(clt, TCG_COND_LT)
67
-GEN_CMP0(cgt, TCG_COND_GT)
68
+GEN_CMP0(gen_gvec_ceq0, TCG_COND_EQ)
69
+GEN_CMP0(gen_gvec_cle0, TCG_COND_LE)
70
+GEN_CMP0(gen_gvec_cge0, TCG_COND_GE)
71
+GEN_CMP0(gen_gvec_clt0, TCG_COND_LT)
72
+GEN_CMP0(gen_gvec_cgt0, TCG_COND_GT)
73
74
#undef GEN_CMP0
75
76
--
20
--
77
2.34.1
21
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
2
3
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-16-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/loongarch64/tcg-target.h | 2 +-
7
tcg/optimize.c | 6 ++----
9
tcg/loongarch64/tcg-target.c.inc | 21 +++++++++++++++++++++
8
1 file changed, 2 insertions(+), 4 deletions(-)
10
2 files changed, 22 insertions(+), 1 deletion(-)
11
9
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
12
--- a/tcg/optimize.c
15
+++ b/tcg/loongarch64/tcg-target.h
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
17
#define TCG_TARGET_HAS_shi_vec 1
15
done = true;
18
#define TCG_TARGET_HAS_shs_vec 0
16
break;
19
#define TCG_TARGET_HAS_shv_vec 1
17
default:
20
-#define TCG_TARGET_HAS_roti_vec 0
18
+ done = finish_folding(&ctx, op);
21
+#define TCG_TARGET_HAS_roti_vec 1
19
break;
22
#define TCG_TARGET_HAS_rots_vec 0
20
}
23
#define TCG_TARGET_HAS_rotv_vec 1
21
-
24
#define TCG_TARGET_HAS_sat_vec 1
22
- if (!done) {
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
23
- finish_folding(&ctx, op);
26
index XXXXXXX..XXXXXXX 100644
24
- }
27
--- a/tcg/loongarch64/tcg-target.c.inc
25
+ tcg_debug_assert(done);
28
+++ b/tcg/loongarch64/tcg-target.c.inc
26
}
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
27
}
30
tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
31
temp_vec));
32
break;
33
+ case INDEX_op_rotli_vec:
34
+ /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
35
+ a2 = extract32(-a2, 0, 3 + vece);
36
+ switch (vece) {
37
+ case MO_8:
38
+ tcg_out_opc_vrotri_b(s, a0, a1, a2);
39
+ break;
40
+ case MO_16:
41
+ tcg_out_opc_vrotri_h(s, a0, a1, a2);
42
+ break;
43
+ case MO_32:
44
+ tcg_out_opc_vrotri_w(s, a0, a1, a2);
45
+ break;
46
+ case MO_64:
47
+ tcg_out_opc_vrotri_d(s, a0, a1, a2);
48
+ break;
49
+ default:
50
+ g_assert_not_reached();
51
+ }
52
+ break;
53
case INDEX_op_bitsel_vec:
54
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
55
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
56
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
57
case INDEX_op_shli_vec:
58
case INDEX_op_shri_vec:
59
case INDEX_op_sari_vec:
60
+ case INDEX_op_rotli_vec:
61
return C_O1_I1(w, w);
62
63
case INDEX_op_bitsel_vec:
64
--
28
--
65
2.34.1
29
2.43.0
diff view generated by jsdifflib
1
Rather than saving MemoryRegionSection and offset,
1
All mask setting is now done with parameters via fold_masks_*.
2
save phys_addr and MemoryRegion. This matches up
3
much closer with the plugin api.
4
2
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/qemu/plugin-memory.h | 11 ++---------
6
tcg/optimize.c | 13 -------------
9
accel/tcg/cputlb.c | 16 +++++++++-------
7
1 file changed, 13 deletions(-)
10
plugins/api.c | 27 ++++++---------------------
11
3 files changed, 17 insertions(+), 37 deletions(-)
12
8
13
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/include/qemu/plugin-memory.h
11
--- a/tcg/optimize.c
16
+++ b/include/qemu/plugin-memory.h
12
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
18
struct qemu_plugin_hwaddr {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
19
bool is_io;
15
20
bool is_store;
16
/* In flight values from optimization. */
21
- union {
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
22
- struct {
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
23
- MemoryRegionSection *section;
19
TCGType type;
24
- hwaddr offset;
20
} OptContext;
25
- } io;
21
26
- struct {
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
27
- void *hostaddr;
23
for (i = 0; i < nb_oargs; i++) {
28
- } ram;
24
TCGTemp *ts = arg_temp(op->args[i]);
29
- } v;
25
reset_ts(ctx, ts);
30
+ hwaddr phys_addr;
26
- /*
31
+ MemoryRegion *mr;
27
- * Save the corresponding known-zero/sign bits mask for the
32
};
28
- * first output argument (only one supported so far).
33
29
- */
34
/**
30
- if (i == 0) {
35
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
31
- ts_info(ts)->z_mask = ctx->z_mask;
36
index XXXXXXX..XXXXXXX 100644
32
- }
37
--- a/accel/tcg/cputlb.c
38
+++ b/accel/tcg/cputlb.c
39
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
40
uintptr_t index = tlb_index(env, mmu_idx, addr);
41
MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
42
uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
43
+ CPUTLBEntryFull *full;
44
45
if (unlikely(!tlb_hit(tlb_addr, addr))) {
46
return false;
47
}
48
49
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
50
+ data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
51
+
52
/* We must have an iotlb entry for MMIO */
53
if (tlb_addr & TLB_MMIO) {
54
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
55
- hwaddr xlat = full->xlat_section;
56
-
57
+ MemoryRegionSection *section =
58
+ iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
59
+ full->attrs);
60
data->is_io = true;
61
- data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
62
- data->v.io.section =
63
- iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
64
+ data->mr = section->mr;
65
} else {
66
data->is_io = false;
67
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
68
+ data->mr = NULL;
69
}
33
}
70
return true;
34
return true;
71
}
35
}
72
diff --git a/plugins/api.c b/plugins/api.c
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
73
index XXXXXXX..XXXXXXX 100644
37
ctx.type = TCG_TYPE_I32;
74
--- a/plugins/api.c
38
}
75
+++ b/plugins/api.c
39
76
@@ -XXX,XX +XXX,XX @@ uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
77
{
41
- ctx.z_mask = -1;
78
#ifdef CONFIG_SOFTMMU
42
- ctx.s_mask = 0;
79
if (haddr) {
80
- if (!haddr->is_io) {
81
- RAMBlock *block;
82
- ram_addr_t offset;
83
- void *hostaddr = haddr->v.ram.hostaddr;
84
-
43
-
85
- block = qemu_ram_block_from_host(hostaddr, false, &offset);
44
/*
86
- if (!block) {
45
* Process each opcode.
87
- error_report("Bad host ram pointer %p", haddr->v.ram.hostaddr);
46
* Sorted alphabetically by opcode as much as possible.
88
- abort();
89
- }
90
-
91
- return block->offset + offset + block->mr->addr;
92
- } else {
93
- MemoryRegionSection *mrs = haddr->v.io.section;
94
- return mrs->offset_within_address_space + haddr->v.io.offset;
95
- }
96
+ return haddr->phys_addr;
97
}
98
#endif
99
return 0;
100
@@ -XXX,XX +XXX,XX @@ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
101
{
102
#ifdef CONFIG_SOFTMMU
103
if (h && h->is_io) {
104
- MemoryRegionSection *mrs = h->v.io.section;
105
- if (!mrs->mr->name) {
106
- unsigned long maddr = 0xffffffff & (uintptr_t) mrs->mr;
107
- g_autofree char *temp = g_strdup_printf("anon%08lx", maddr);
108
+ MemoryRegion *mr = h->mr;
109
+ if (!mr->name) {
110
+ unsigned maddr = (uintptr_t)mr;
111
+ g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
112
return g_intern_string(temp);
113
} else {
114
- return g_intern_string(mrs->mr->name);
115
+ return g_intern_string(mr->name);
116
}
117
} else {
118
return g_intern_static_string("RAM");
119
--
47
--
120
2.34.1
48
2.43.0
121
122
diff view generated by jsdifflib
1
For linux aarch64 host supporting BTI, map the buffer
1
All instances of s_mask have been converted to the new
2
to require BTI instructions at branch landing pads.
2
representation. We can now re-enable usage.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/region.c | 41 ++++++++++++++++++++++++++++++-----------
7
tcg/optimize.c | 4 ++--
8
1 file changed, 30 insertions(+), 11 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
9
9
10
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/region.c
12
--- a/tcg/optimize.c
13
+++ b/tcg/region.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
15
#include "tcg/tcg.h"
15
g_assert_not_reached();
16
#include "exec/translation-block.h"
17
#include "tcg-internal.h"
18
+#include "host/cpuinfo.h"
19
20
21
+/*
22
+ * Local source-level compatibility with Unix.
23
+ * Used by tcg_region_init below.
24
+ */
25
+#if defined(_WIN32)
26
+#define PROT_READ 1
27
+#define PROT_WRITE 2
28
+#define PROT_EXEC 4
29
+#endif
30
+
31
struct tcg_region_tree {
32
QemuMutex lock;
33
QTree *tree;
34
@@ -XXX,XX +XXX,XX @@ bool in_code_gen_buffer(const void *p)
35
return (size_t)(p - region.start_aligned) <= region.total_size;
36
}
37
38
+#ifndef CONFIG_TCG_INTERPRETER
39
+static int host_prot_read_exec(void)
40
+{
41
+#if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI)
42
+ if (cpuinfo & CPUINFO_BTI) {
43
+ return PROT_READ | PROT_EXEC | PROT_BTI;
44
+ }
45
+#endif
46
+ return PROT_READ | PROT_EXEC;
47
+}
48
+#endif
49
+
50
#ifdef CONFIG_DEBUG_TCG
51
const void *tcg_splitwx_to_rx(void *rw)
52
{
53
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
54
return PROT_READ | PROT_WRITE;
55
}
56
#elif defined(_WIN32)
57
-/*
58
- * Local source-level compatibility with Unix.
59
- * Used by tcg_region_init below.
60
- */
61
-#define PROT_READ 1
62
-#define PROT_WRITE 2
63
-#define PROT_EXEC 4
64
-
65
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
66
{
67
void *buf;
68
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
69
goto fail;
70
}
16
}
71
17
72
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
73
+ buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0);
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
74
if (buf_rx == MAP_FAILED) {
20
return true;
75
goto fail_rx;
76
}
21
}
77
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
22
78
return -1;
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
79
}
30
}
80
31
81
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
82
+ if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) {
83
error_setg_errno(errp, errno, "mprotect for jit splitwx");
84
munmap((void *)buf_rx, size);
85
munmap((void *)buf_rw, size);
86
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
87
need_prot = PROT_READ | PROT_WRITE;
88
#ifndef CONFIG_TCG_INTERPRETER
89
if (tcg_splitwx_diff == 0) {
90
- need_prot |= PROT_EXEC;
91
+ need_prot |= host_prot_read_exec();
92
}
93
#endif
94
for (size_t i = 0, n = region.n; i < n; i++) {
95
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
96
} else if (need_prot == (PROT_READ | PROT_WRITE)) {
97
rc = qemu_mprotect_rw(start, end - start);
98
} else {
99
+#ifdef CONFIG_POSIX
100
+ rc = mprotect(start, end - start, need_prot);
101
+#else
102
g_assert_not_reached();
103
+#endif
104
}
105
if (rc) {
106
error_setg_errno(&error_fatal, errno,
107
--
32
--
108
2.34.1
33
2.43.0
109
110
diff view generated by jsdifflib
1
Avoid multiple calls to io_prepare for unaligned acceses.
1
The big comment just above says functions should be sorted.
2
One call to do_st_mmio_leN will never cross pages.
2
Add forward declarations as needed.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
accel/tcg/cputlb.c | 82 +++++++++++++++++-----------------------------
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
7
1 file changed, 30 insertions(+), 52 deletions(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
8
9
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/cputlb.c
12
--- a/tcg/optimize.c
12
+++ b/accel/tcg/cputlb.c
13
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
14
}
15
* 3) those that produce information about the result value.
16
*/
17
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
21
+
22
static bool fold_add(OptContext *ctx, TCGOp *op)
23
{
24
if (fold_const2_commutative(ctx, op) ||
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
15
}
27
}
16
28
17
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
18
- int mmu_idx, uint64_t val, vaddr addr,
30
+{
19
- uintptr_t retaddr, MemOp op)
31
+ /* If true and false values are the same, eliminate the cmp. */
32
+ if (args_are_copies(op->args[2], op->args[3])) {
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
34
+ }
35
+
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
37
+ uint64_t tv = arg_info(op->args[2])->val;
38
+ uint64_t fv = arg_info(op->args[3])->val;
39
+
40
+ if (tv == -1 && fv == 0) {
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
42
+ }
43
+ if (tv == 0 && fv == -1) {
44
+ if (TCG_TARGET_HAS_not_vec) {
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
}
90
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
20
-{
92
-{
21
- MemoryRegionSection *section;
93
- /* If true and false values are the same, eliminate the cmp. */
22
- hwaddr mr_offset;
94
- if (args_are_copies(op->args[2], op->args[3])) {
23
- MemoryRegion *mr;
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
24
- MemTxResult r;
25
-
26
- section = io_prepare(&mr_offset, env, full->xlat_section,
27
- full->attrs, addr, retaddr);
28
- mr = section->mr;
29
-
30
- {
31
- QEMU_IOTHREAD_LOCK_GUARD();
32
- r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
33
- }
96
- }
34
-
97
-
35
- if (r != MEMTX_OK) {
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
36
- io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
99
- uint64_t tv = arg_info(op->args[2])->val;
37
- r, retaddr);
100
- uint64_t fv = arg_info(op->args[3])->val;
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
38
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
39
-}
144
-}
40
-
145
-
41
/* Return true if ADDR is present in the victim tlb, and has been copied
146
/* Propagate constants and copies, fold constant expressions. */
42
back to the main tlb. */
147
void tcg_optimize(TCGContext *s)
43
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
44
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
45
uint64_t val_le, vaddr addr, int size,
46
int mmu_idx, uintptr_t ra)
47
{
148
{
48
+ MemoryRegionSection *section;
49
+ hwaddr mr_offset;
50
+ MemoryRegion *mr;
51
+ MemTxAttrs attrs;
52
+
53
tcg_debug_assert(size > 0 && size <= 8);
54
55
+ attrs = full->attrs;
56
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
57
+ mr = section->mr;
58
+
59
do {
60
+ MemOp this_mop;
61
+ unsigned this_size;
62
+ MemTxResult r;
63
+
64
/* Store aligned pieces up to 8 bytes. */
65
- switch ((size | (int)addr) & 7) {
66
- case 1:
67
- case 3:
68
- case 5:
69
- case 7:
70
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
71
- val_le >>= 8;
72
- size -= 1;
73
- addr += 1;
74
- break;
75
- case 2:
76
- case 6:
77
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
78
- val_le >>= 16;
79
- size -= 2;
80
- addr += 2;
81
- break;
82
- case 4:
83
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
84
- val_le >>= 32;
85
- size -= 4;
86
- addr += 4;
87
- break;
88
- case 0:
89
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
90
- return 0;
91
- default:
92
- qemu_build_not_reached();
93
+ this_mop = ctz32(size | (int)addr | 8);
94
+ this_size = 1 << this_mop;
95
+ this_mop |= MO_LE;
96
+
97
+ r = memory_region_dispatch_write(mr, mr_offset, val_le,
98
+ this_mop, attrs);
99
+ if (unlikely(r != MEMTX_OK)) {
100
+ io_failed(env, full, addr, this_size, MMU_DATA_STORE,
101
+ mmu_idx, r, ra);
102
}
103
+ if (this_size == 8) {
104
+ return 0;
105
+ }
106
+
107
+ val_le >>= this_size * 8;
108
+ addr += this_size;
109
+ mr_offset += this_size;
110
+ size -= this_size;
111
} while (size);
112
113
return val_le;
114
--
149
--
115
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
The big comment just above says functions should be sorted.
2
2
3
If LSX is available, use LSX instructions to implement 128-bit load &
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
store when MO_128 is required, otherwise use two 64-bit loads & stores.
5
6
Signed-off-by: Jiajie Chen <c@jia.je>
7
Message-Id: <20230908022302.180442-17-c@jia.je>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
tcg/loongarch64/tcg-target-con-set.h | 2 +
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
12
tcg/loongarch64/tcg-target.h | 2 +-
7
1 file changed, 30 insertions(+), 30 deletions(-)
13
tcg/loongarch64/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++
14
3 files changed, 66 insertions(+), 1 deletion(-)
15
8
16
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/loongarch64/tcg-target-con-set.h
11
--- a/tcg/optimize.c
19
+++ b/tcg/loongarch64/tcg-target-con-set.h
12
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
21
C_O0_I2(rZ, r)
14
return true;
22
C_O0_I2(rZ, rZ)
23
C_O0_I2(w, r)
24
+C_O0_I3(r, r, r)
25
C_O1_I1(r, r)
26
C_O1_I1(w, r)
27
C_O1_I1(w, w)
28
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, wM)
29
C_O1_I2(w, w, wA)
30
C_O1_I3(w, w, w, w)
31
C_O1_I4(r, rZ, rJ, rZ, rZ)
32
+C_O2_I1(r, r, r)
33
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/loongarch64/tcg-target.h
36
+++ b/tcg/loongarch64/tcg-target.h
37
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
38
#define TCG_TARGET_HAS_muluh_i64 1
39
#define TCG_TARGET_HAS_mulsh_i64 1
40
41
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
42
+#define TCG_TARGET_HAS_qemu_ldst_i128 use_lsx_instructions
43
44
#define TCG_TARGET_HAS_v64 0
45
#define TCG_TARGET_HAS_v128 use_lsx_instructions
46
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/loongarch64/tcg-target.c.inc
49
+++ b/tcg/loongarch64/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
51
}
52
}
15
}
53
16
54
+static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
55
+ TCGReg addr_reg, MemOpIdx oi, bool is_ld)
56
+{
18
+{
57
+ TCGLabelQemuLdst *ldst;
19
+ /* Canonicalize the comparison to put immediate second. */
58
+ HostAddress h;
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
24
+}
59
+
25
+
60
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
61
+
27
+{
62
+ if (h.aa.atom == MO_128) {
28
+ /* If true and false values are the same, eliminate the cmp. */
63
+ /*
29
+ if (args_are_copies(op->args[3], op->args[4])) {
64
+ * Use VLDX/VSTX when 128-bit atomicity is required.
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
65
+ * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
66
+ */
67
+ if (is_ld) {
68
+ tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
69
+ tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
70
+ tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
71
+ } else {
72
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
73
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
74
+ tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
75
+ }
76
+ } else {
77
+ /* Otherwise use a pair of LD/ST. */
78
+ TCGReg base = h.base;
79
+ if (h.index != TCG_REG_ZERO) {
80
+ base = TCG_REG_TMP0;
81
+ tcg_out_opc_add_d(s, base, h.base, h.index);
82
+ }
83
+ if (is_ld) {
84
+ tcg_out_opc_ld_d(s, data_lo, base, 0);
85
+ tcg_out_opc_ld_d(s, data_hi, base, 8);
86
+ } else {
87
+ tcg_out_opc_st_d(s, data_lo, base, 0);
88
+ tcg_out_opc_st_d(s, data_hi, base, 8);
89
+ }
90
+ }
31
+ }
91
+
32
+
92
+ if (ldst) {
33
+ /* Canonicalize the comparison to put immediate second. */
93
+ ldst->type = TCG_TYPE_I128;
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
94
+ ldst->datalo_reg = data_lo;
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
95
+ ldst->datahi_reg = data_hi;
96
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
97
+ }
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
98
+}
45
+}
99
+
46
+
100
/*
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
101
* Entry-points
48
{
102
*/
49
uint64_t z_mask, s_mask;
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
104
TCGArg a0 = args[0];
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
105
TCGArg a1 = args[1];
52
}
106
TCGArg a2 = args[2];
53
107
+ TCGArg a3 = args[3];
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
108
int c2 = const_args[2];
55
-{
109
56
- /* Canonicalize the comparison to put immediate second. */
110
switch (opc) {
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
58
- op->args[3] = tcg_swap_cond(op->args[3]);
112
case INDEX_op_qemu_ld_a64_i64:
59
- }
113
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
60
- return finish_folding(ctx, op);
114
break;
61
-}
115
+ case INDEX_op_qemu_ld_a32_i128:
62
-
116
+ case INDEX_op_qemu_ld_a64_i128:
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
117
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
64
-{
118
+ break;
65
- /* If true and false values are the same, eliminate the cmp. */
119
case INDEX_op_qemu_st_a32_i32:
66
- if (args_are_copies(op->args[3], op->args[4])) {
120
case INDEX_op_qemu_st_a64_i32:
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
121
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
68
- }
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
69
-
123
case INDEX_op_qemu_st_a64_i64:
70
- /* Canonicalize the comparison to put immediate second. */
124
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
125
break;
72
- op->args[5] = tcg_swap_cond(op->args[5]);
126
+ case INDEX_op_qemu_st_a32_i128:
73
- }
127
+ case INDEX_op_qemu_st_a64_i128:
74
- /*
128
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
75
- * Canonicalize the "false" input reg to match the destination,
129
+ break;
76
- * so that the tcg backend can implement "move if true".
130
77
- */
131
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
132
case INDEX_op_mov_i64:
79
- op->args[5] = tcg_invert_cond(op->args[5]);
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
80
- }
134
case INDEX_op_qemu_st_a64_i64:
81
- return finish_folding(ctx, op);
135
return C_O0_I2(rZ, r);
82
-}
136
83
-
137
+ case INDEX_op_qemu_ld_a32_i128:
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
138
+ case INDEX_op_qemu_ld_a64_i128:
85
{
139
+ return C_O2_I1(r, r, r);
86
uint64_t z_mask, s_mask, s_mask_old;
140
+
141
+ case INDEX_op_qemu_st_a32_i128:
142
+ case INDEX_op_qemu_st_a64_i128:
143
+ return C_O0_I3(r, r, r);
144
+
145
case INDEX_op_brcond_i32:
146
case INDEX_op_brcond_i64:
147
return C_O0_I2(rZ, rZ);
148
--
87
--
149
2.34.1
88
2.43.0
diff view generated by jsdifflib
1
These are common code from io_readx and io_writex.
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
accel/tcg/cputlb.c | 77 +++++++++++++++++++++++++++-------------------
7
include/fpu/softfloat.h | 6 ++++
7
1 file changed, 45 insertions(+), 32 deletions(-)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
8
9
fpu/softfloat-parts.c.inc | 7 +++--
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/cputlb.c
14
--- a/include/fpu/softfloat.h
12
+++ b/accel/tcg/cputlb.c
15
+++ b/include/fpu/softfloat.h
13
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
14
* (non-page-aligned) vaddr of the eventual memory access to get
17
float16 float16_sub(float16, float16, float_status *status);
15
* the MemoryRegion offset for the access. Note that the vaddr we
18
float16 float16_mul(float16, float16, float_status *status);
16
* subtract here is that of the page base, and not the same as the
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
17
- * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
20
+float16 float16_muladd_scalbn(float16, float16, float16,
18
+ * vaddr we add back in io_prepare()/get_page_addr_code().
21
+ int, int, float_status *status);
19
*/
22
float16 float16_div(float16, float16, float_status *status);
20
desc->fulltlb[index] = *full;
23
float16 float16_scalbn(float16, int, float_status *status);
21
full = &desc->fulltlb[index];
24
float16 float16_min(float16, float16, float_status *status);
22
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
23
}
26
float32 float32_div(float32, float32, float_status *status);
24
}
27
float32 float32_rem(float32, float32, float_status *status);
25
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
26
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
29
+float32 float32_muladd_scalbn(float32, float32, float32,
27
- int mmu_idx, vaddr addr, uintptr_t retaddr,
30
+ int, int, float_status *status);
28
- MMUAccessType access_type, MemOp op)
31
float32 float32_sqrt(float32, float_status *status);
29
+static MemoryRegionSection *
32
float32 float32_exp2(float32, float_status *status);
30
+io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
33
float32 float32_log2(float32, float_status *status);
31
+ MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
32
{
35
float64 float64_div(float64, float64, float_status *status);
33
CPUState *cpu = env_cpu(env);
36
float64 float64_rem(float64, float64, float_status *status);
34
- hwaddr mr_offset;
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
35
MemoryRegionSection *section;
38
+float64 float64_muladd_scalbn(float64, float64, float64,
36
- MemoryRegion *mr;
39
+ int, int, float_status *status);
37
- uint64_t val;
40
float64 float64_sqrt(float64, float_status *status);
38
- MemTxResult r;
41
float64 float64_log2(float64, float_status *status);
39
+ hwaddr mr_offset;
42
FloatRelation float64_compare(float64, float64, float_status *status);
40
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
41
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
44
index XXXXXXX..XXXXXXX 100644
42
- mr = section->mr;
45
--- a/fpu/softfloat.c
43
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
46
+++ b/fpu/softfloat.c
44
+ section = iotlb_to_section(cpu, xlat, attrs);
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
45
+ mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
48
#define parts_mul(A, B, S) \
46
cpu->mem_io_pc = retaddr;
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
47
if (!cpu->can_do_io) {
50
48
cpu_io_recompile(cpu, retaddr);
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
49
}
52
- FloatParts64 *c, int flags,
50
53
- float_status *s);
51
+ *out_offset = mr_offset;
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
52
+ return section;
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
53
+}
99
+}
54
+
100
+
55
+static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
101
+float32 QEMU_SOFTFLOAT_ATTR
56
+ unsigned size, MMUAccessType access_type, int mmu_idx,
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
57
+ MemTxResult response, uintptr_t retaddr,
103
+ int scale, int flags, float_status *status)
58
+ MemoryRegionSection *section, hwaddr mr_offset)
104
{
59
+{
105
FloatParts64 pa, pb, pc, *pr;
60
+ hwaddr physaddr = (mr_offset +
106
61
+ section->offset_within_address_space -
107
float32_unpack_canonical(&pa, a, status);
62
+ section->offset_within_region);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
63
+
182
+
64
+ cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
65
+ mmu_idx, full->attrs, response, retaddr);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
66
+}
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
67
+
186
xnp = *parts_mul(&xnp, &xp, status);
68
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
69
+ int mmu_idx, vaddr addr, uintptr_t retaddr,
70
+ MMUAccessType access_type, MemOp op)
71
+{
72
+ MemoryRegionSection *section;
73
+ hwaddr mr_offset;
74
+ MemoryRegion *mr;
75
+ MemTxResult r;
76
+ uint64_t val;
77
+
78
+ section = io_prepare(&mr_offset, env, full->xlat_section,
79
+ full->attrs, addr, retaddr);
80
+ mr = section->mr;
81
+
82
{
83
QEMU_IOTHREAD_LOCK_GUARD();
84
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
85
}
187
}
86
188
87
if (r != MEMTX_OK) {
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
88
- hwaddr physaddr = mr_offset +
190
index XXXXXXX..XXXXXXX 100644
89
- section->offset_within_address_space -
191
--- a/fpu/softfloat-parts.c.inc
90
- section->offset_within_region;
192
+++ b/fpu/softfloat-parts.c.inc
91
-
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
92
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
194
* Requires A and C extracted into a double-sized structure to provide the
93
- mmu_idx, full->attrs, r, retaddr);
195
* extra space for the widening multiply.
94
+ io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
196
*/
95
+ r, retaddr, section, mr_offset);
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
96
}
212
}
97
return val;
213
+ a->exp += scale;
98
}
214
finish_sign:
99
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
215
if (flags & float_muladd_negate_result) {
100
int mmu_idx, uint64_t val, vaddr addr,
216
a->sign ^= 1;
101
uintptr_t retaddr, MemOp op)
102
{
103
- CPUState *cpu = env_cpu(env);
104
- hwaddr mr_offset;
105
MemoryRegionSection *section;
106
+ hwaddr mr_offset;
107
MemoryRegion *mr;
108
MemTxResult r;
109
110
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
111
+ section = io_prepare(&mr_offset, env, full->xlat_section,
112
+ full->attrs, addr, retaddr);
113
mr = section->mr;
114
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
115
- if (!cpu->can_do_io) {
116
- cpu_io_recompile(cpu, retaddr);
117
- }
118
- cpu->mem_io_pc = retaddr;
119
120
{
121
QEMU_IOTHREAD_LOCK_GUARD();
122
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
123
}
124
125
if (r != MEMTX_OK) {
126
- hwaddr physaddr = mr_offset +
127
- section->offset_within_address_space -
128
- section->offset_within_region;
129
-
130
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
131
- MMU_DATA_STORE, mmu_idx, full->attrs, r,
132
- retaddr);
133
+ io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
134
+ r, retaddr, section, mr_offset);
135
}
136
}
137
138
--
217
--
139
2.34.1
218
2.43.0
140
219
141
220
diff view generated by jsdifflib
1
Push computation down into the if statements to the point
1
Use the scalbn interface instead of float_muladd_halve_result.
2
the data is used.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/cputlb.c | 33 +++++++++++++--------------------
6
target/arm/tcg/helper-a64.c | 6 +++---
8
1 file changed, 13 insertions(+), 20 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
9
8
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
11
--- a/target/arm/tcg/helper-a64.c
13
+++ b/accel/tcg/cputlb.c
12
+++ b/target/arm/tcg/helper-a64.c
14
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
15
mmu_idx, retaddr);
14
(float16_is_infinity(b) && float16_is_zero(a))) {
15
return float16_one_point_five;
16
}
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
16
}
19
}
17
20
18
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
19
- vaddr addr, unsigned size,
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
20
- MMUAccessType access_type,
23
(float32_is_infinity(b) && float32_is_zero(a))) {
21
- int mmu_idx, MemTxAttrs attrs,
24
return float32_one_point_five;
22
- MemTxResult response,
25
}
23
- uintptr_t retaddr)
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
24
-{
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
25
- CPUClass *cc = CPU_GET_CLASS(cpu);
26
-
27
- if (!cpu->ignore_memory_transaction_failures &&
28
- cc->tcg_ops->do_transaction_failed) {
29
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
30
- access_type, mmu_idx, attrs,
31
- response, retaddr);
32
- }
33
-}
34
-
35
static MemoryRegionSection *
36
io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
37
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
38
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
39
unsigned size, MMUAccessType access_type, int mmu_idx,
40
MemTxResult response, uintptr_t retaddr)
41
{
42
- hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
43
- cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
44
- mmu_idx, full->attrs, response, retaddr);
45
+ CPUState *cpu = env_cpu(env);
46
+
47
+ if (!cpu->ignore_memory_transaction_failures) {
48
+ CPUClass *cc = CPU_GET_CLASS(cpu);
49
+
50
+ if (cc->tcg_ops->do_transaction_failed) {
51
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
52
+
53
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
54
+ access_type, mmu_idx,
55
+ full->attrs, response, retaddr);
56
+ }
57
+ }
58
}
28
}
59
29
60
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
33
return float64_one_point_five;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
61
--
40
--
62
2.34.1
41
2.43.0
63
42
64
43
diff view generated by jsdifflib
1
Use the scalbn interface instead of float_muladd_halve_result.
2
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
host/include/aarch64/host/cpuinfo.h | 1 +
6
target/sparc/helper.h | 4 +-
5
util/cpuinfo-aarch64.c | 7 +++++++
7
target/sparc/fop_helper.c | 8 ++--
6
2 files changed, 8 insertions(+)
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
7
9
3 files changed, 54 insertions(+), 38 deletions(-)
8
diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/host/include/aarch64/host/cpuinfo.h
13
--- a/target/sparc/helper.h
11
+++ b/host/include/aarch64/host/cpuinfo.h
14
+++ b/target/sparc/helper.h
12
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
13
#define CPUINFO_LSE (1u << 1)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
14
#define CPUINFO_LSE2 (1u << 2)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
15
#define CPUINFO_AES (1u << 3)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
+#define CPUINFO_BTI (1u << 4)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
17
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
18
/* Initialized with a constructor. */
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
extern unsigned cpuinfo;
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
20
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
21
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
22
--- a/util/cpuinfo-aarch64.c
35
--- a/target/sparc/fop_helper.c
23
+++ b/util/cpuinfo-aarch64.c
36
+++ b/target/sparc/fop_helper.c
24
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
25
# include <asm/hwcap.h>
38
}
26
# include "elf.h"
39
27
# endif
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
28
+# ifndef HWCAP2_BTI
41
- float32 s2, float32 s3, uint32_t op)
29
+# define HWCAP2_BTI 0 /* added in glibc 2.32 */
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
30
+# endif
43
{
31
#endif
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
32
#ifdef CONFIG_DARWIN
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
33
# include <sys/sysctl.h>
46
check_ieee_exceptions(env, GETPC());
34
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
47
return ret;
35
info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
48
}
36
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
49
37
info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0);
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
38
+
51
- float64 s2, float64 s3, uint32_t op)
39
+ unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
40
+ info |= (hwcap2 & HWCAP2_BTI ? CPUINFO_BTI : 0);
53
{
41
#endif
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
42
#ifdef CONFIG_DARWIN
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
43
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
56
check_ieee_exceptions(env, GETPC());
44
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
57
return ret;
45
info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
58
}
46
+ info |= sysctl_for_bool("hw.optional.arm.FEAT_BTI") * CPUINFO_BTI;
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
47
#endif
60
index XXXXXXX..XXXXXXX 100644
48
61
--- a/target/sparc/translate.c
49
cpuinfo = info;
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
50
--
205
--
51
2.34.1
206
2.43.0
52
207
53
208
diff view generated by jsdifflib
1
Motorola treats denormals with explicit integer bit set as
1
All uses have been convered to float*_muladd_scalbn.
2
having unbiased exponent 0, unlike Intel which treats it as
3
having unbiased exponent 1 (more like all other IEEE formats
4
that have no explicit integer bit).
5
2
6
Add a flag on FloatFmt to differentiate the behaviour.
7
8
Reported-by: Keith Packard <keithp@keithp.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
5
---
12
fpu/softfloat.c | 9 +++++-
6
include/fpu/softfloat.h | 3 ---
13
tests/tcg/m68k/denormal.c | 53 ++++++++++++++++++++++++++++++++++
7
fpu/softfloat.c | 6 ------
14
fpu/softfloat-parts.c.inc | 7 +++--
8
fpu/softfloat-parts.c.inc | 4 ----
15
tests/tcg/m68k/Makefile.target | 2 +-
9
3 files changed, 13 deletions(-)
16
4 files changed, 66 insertions(+), 5 deletions(-)
17
create mode 100644 tests/tcg/m68k/denormal.c
18
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
-| We also support halving the result before rounding, as a special
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
21
*----------------------------------------------------------------------------*/
22
enum {
23
float_muladd_negate_c = 1,
24
float_muladd_negate_product = 2,
25
float_muladd_negate_result = 4,
26
- float_muladd_halve_result = 8,
27
};
28
29
/*----------------------------------------------------------------------------
19
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
20
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
21
--- a/fpu/softfloat.c
32
--- a/fpu/softfloat.c
22
+++ b/fpu/softfloat.c
33
+++ b/fpu/softfloat.c
23
@@ -XXX,XX +XXX,XX @@ typedef struct {
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
24
* round_mask: bits below lsb which must be rounded
35
if (unlikely(!can_use_fpu(s))) {
25
* The following optional modifiers are available:
36
goto soft;
26
* arm_althp: handle ARM Alternative Half Precision
37
}
27
+ * m68k_denormal: explicit integer bit for extended precision may be 1
38
- if (unlikely(flags & float_muladd_halve_result)) {
28
*/
39
- goto soft;
29
typedef struct {
40
- }
30
int exp_size;
41
31
@@ -XXX,XX +XXX,XX @@ typedef struct {
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
32
int frac_size;
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
33
int frac_shift;
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
34
bool arm_althp;
45
if (unlikely(!can_use_fpu(s))) {
35
+ bool m68k_denormal;
46
goto soft;
36
uint64_t round_mask;
47
}
37
} FloatFmt;
48
- if (unlikely(flags & float_muladd_halve_result)) {
38
49
- goto soft;
39
@@ -XXX,XX +XXX,XX @@ static const FloatFmt float128_params = {
50
- }
40
static const FloatFmt floatx80_params[3] = {
51
41
[floatx80_precision_s] = { FLOATX80_PARAMS(23) },
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
42
[floatx80_precision_d] = { FLOATX80_PARAMS(52) },
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
43
- [floatx80_precision_x] = { FLOATX80_PARAMS(64) },
44
+ [floatx80_precision_x] = {
45
+ FLOATX80_PARAMS(64),
46
+#ifdef TARGET_M68K
47
+ .m68k_denormal = true,
48
+#endif
49
+ },
50
};
51
52
/* Unpack a float to parts, but do not canonicalize. */
53
diff --git a/tests/tcg/m68k/denormal.c b/tests/tcg/m68k/denormal.c
54
new file mode 100644
55
index XXXXXXX..XXXXXXX
56
--- /dev/null
57
+++ b/tests/tcg/m68k/denormal.c
58
@@ -XXX,XX +XXX,XX @@
59
+/*
60
+ * Test m68k extended double denormals.
61
+ */
62
+
63
+#include <stdio.h>
64
+#include <stdint.h>
65
+
66
+#define TEST(X, Y) { X, Y, X * Y }
67
+
68
+static volatile long double test[][3] = {
69
+ TEST(0x1p+16383l, 0x1p-16446l),
70
+ TEST(0x1.1p-8223l, 0x1.1p-8224l),
71
+ TEST(1.0l, 0x1p-16383l),
72
+};
73
+
74
+#undef TEST
75
+
76
+static void dump_ld(const char *label, long double ld)
77
+{
78
+ union {
79
+ long double d;
80
+ struct {
81
+ uint32_t exp:16;
82
+ uint32_t space:16;
83
+ uint32_t h;
84
+ uint32_t l;
85
+ };
86
+ } u;
87
+
88
+ u.d = ld;
89
+ printf("%12s: % -27La 0x%04x 0x%08x 0x%08x\n", label, u.d, u.exp, u.h, u.l);
90
+}
91
+
92
+int main(void)
93
+{
94
+ int i, n = sizeof(test) / sizeof(test[0]), err = 0;
95
+
96
+ for (i = 0; i < n; ++i) {
97
+ long double x = test[i][0];
98
+ long double y = test[i][1];
99
+ long double build_mul = test[i][2];
100
+ long double runtime_mul = x * y;
101
+
102
+ if (runtime_mul != build_mul) {
103
+ dump_ld("x", x);
104
+ dump_ld("y", y);
105
+ dump_ld("build_mul", build_mul);
106
+ dump_ld("runtime_mul", runtime_mul);
107
+ err = 1;
108
+ }
109
+ }
110
+ return err;
111
+}
112
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
113
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
114
--- a/fpu/softfloat-parts.c.inc
56
--- a/fpu/softfloat-parts.c.inc
115
+++ b/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
116
@@ -XXX,XX +XXX,XX @@ static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
117
} else {
59
a->exp = p_widen.exp;
118
int shift = frac_normalize(p);
60
119
p->cls = float_class_normal;
61
return_normal:
120
- p->exp = fmt->frac_shift - fmt->exp_bias - shift + 1;
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
121
+ p->exp = fmt->frac_shift - fmt->exp_bias
63
- if (flags & float_muladd_halve_result) {
122
+ - shift + !fmt->m68k_denormal;
64
- a->exp -= 1;
123
}
65
- }
124
} else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
66
a->exp += scale;
125
p->cls = float_class_normal;
67
finish_sign:
126
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
68
if (flags & float_muladd_negate_result) {
127
is_tiny = !frac_addi(&discard, p, inc);
128
}
129
130
- frac_shrjam(p, 1 - exp);
131
+ frac_shrjam(p, !fmt->m68k_denormal - exp);
132
133
if (p->frac_lo & round_mask) {
134
/* Need to recompute round-to-even/round-to-odd. */
135
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
136
p->frac_lo &= ~round_mask;
137
}
138
139
- exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) != 0;
140
+ exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal;
141
frac_shr(p, frac_shift);
142
143
if (is_tiny && (flags & float_flag_inexact)) {
144
diff --git a/tests/tcg/m68k/Makefile.target b/tests/tcg/m68k/Makefile.target
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tests/tcg/m68k/Makefile.target
147
+++ b/tests/tcg/m68k/Makefile.target
148
@@ -XXX,XX +XXX,XX @@
149
#
150
151
VPATH += $(SRC_PATH)/tests/tcg/m68k
152
-TESTS += trap
153
+TESTS += trap denormal
154
155
# On m68k Linux supports 4k and 8k pages (but 8k is currently broken)
156
EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192
157
--
69
--
158
2.34.1
70
2.43.0
159
71
160
72
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
This rounding mode is used by Hexagon.
2
2
3
Lower the following ops:
4
5
- rotrv_vec
6
- rotlv_vec
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230908022302.180442-15-c@jia.je>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
4
---
13
tcg/loongarch64/tcg-target.h | 2 +-
5
include/fpu/softfloat-types.h | 2 ++
14
tcg/loongarch64/tcg-target.c.inc | 14 ++++++++++++++
6
fpu/softfloat-parts.c.inc | 3 +++
15
2 files changed, 15 insertions(+), 1 deletion(-)
7
2 files changed, 5 insertions(+)
16
8
17
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
18
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/loongarch64/tcg-target.h
11
--- a/include/fpu/softfloat-types.h
20
+++ b/tcg/loongarch64/tcg-target.h
12
+++ b/include/fpu/softfloat-types.h
21
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
22
#define TCG_TARGET_HAS_shv_vec 1
14
float_round_to_odd = 5,
23
#define TCG_TARGET_HAS_roti_vec 0
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
24
#define TCG_TARGET_HAS_rots_vec 0
16
float_round_to_odd_inf = 6,
25
-#define TCG_TARGET_HAS_rotv_vec 0
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
26
+#define TCG_TARGET_HAS_rotv_vec 1
18
+ float_round_nearest_even_max = 7,
27
#define TCG_TARGET_HAS_sat_vec 1
19
} FloatRoundMode;
28
#define TCG_TARGET_HAS_minmax_vec 1
20
29
#define TCG_TARGET_HAS_bitsel_vec 1
21
/*
30
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
31
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/loongarch64/tcg-target.c.inc
24
--- a/fpu/softfloat-parts.c.inc
33
+++ b/tcg/loongarch64/tcg-target.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
35
static const LoongArchInsn sari_vec_insn[4] = {
27
int exp, flags = 0;
36
OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
28
37
};
29
switch (s->float_rounding_mode) {
38
+ static const LoongArchInsn rotrv_vec_insn[4] = {
30
+ case float_round_nearest_even_max:
39
+ OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
31
+ overflow_norm = true;
40
+ };
32
+ /* fall through */
41
33
case float_round_nearest_even:
42
a0 = args[0];
34
if (N > 64 && frac_lsb == 0) {
43
a1 = args[1];
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
45
case INDEX_op_sari_vec:
46
tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
47
break;
48
+ case INDEX_op_rotrv_vec:
49
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
50
+ break;
51
+ case INDEX_op_rotlv_vec:
52
+ /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
53
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
54
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
55
+ temp_vec));
56
+ break;
57
case INDEX_op_bitsel_vec:
58
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
59
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
60
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
61
case INDEX_op_shlv_vec:
62
case INDEX_op_shrv_vec:
63
case INDEX_op_sarv_vec:
64
+ case INDEX_op_rotrv_vec:
65
+ case INDEX_op_rotlv_vec:
66
return C_O1_I2(w, w, w);
67
68
case INDEX_op_not_vec:
69
--
36
--
70
2.34.1
37
2.43.0
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
2
3
3
We missed these functions when upstreaming the bfloat16 support.
4
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
6
Message-Id: <20230531065458.2082-1-zhiwei_liu@linux.alibaba.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
include/fpu/softfloat.h | 12 +++++++++
6
include/fpu/softfloat.h | 5 +++++
10
fpu/softfloat.c | 58 +++++++++++++++++++++++++++++++++++++++++
7
fpu/softfloat.c | 3 +++
11
2 files changed, 70 insertions(+)
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
12
10
13
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/fpu/softfloat.h
13
--- a/include/fpu/softfloat.h
16
+++ b/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
17
@@ -XXX,XX +XXX,XX @@ float32 bfloat16_to_float32(bfloat16, float_status *status);
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
18
bfloat16 float64_to_bfloat16(float64 a, float_status *status);
16
| Using these differs from negating an input or output before calling
19
float64 bfloat16_to_float64(bfloat16 a, float_status *status);
17
| the muladd function in that this means that a NaN doesn't have its
20
18
| sign bit inverted before it is propagated.
21
+int8_t bfloat16_to_int8_scalbn(bfloat16, FloatRoundMode,
19
+|
22
+ int, float_status *status);
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
23
int16_t bfloat16_to_int16_scalbn(bfloat16, FloatRoundMode,
21
+| such that the product is a true zero, then return C without addition.
24
int, float_status *status);
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
25
int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
23
*----------------------------------------------------------------------------*/
26
@@ -XXX,XX +XXX,XX @@ int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
24
enum {
27
int64_t bfloat16_to_int64_scalbn(bfloat16, FloatRoundMode,
25
float_muladd_negate_c = 1,
28
int, float_status *status);
26
float_muladd_negate_product = 2,
29
27
float_muladd_negate_result = 4,
30
+int8_t bfloat16_to_int8(bfloat16, float_status *status);
28
+ float_muladd_suppress_add_product_zero = 8,
31
int16_t bfloat16_to_int16(bfloat16, float_status *status);
29
};
32
int32_t bfloat16_to_int32(bfloat16, float_status *status);
30
33
int64_t bfloat16_to_int64(bfloat16, float_status *status);
31
/*----------------------------------------------------------------------------
34
35
+int8_t bfloat16_to_int8_round_to_zero(bfloat16, float_status *status);
36
int16_t bfloat16_to_int16_round_to_zero(bfloat16, float_status *status);
37
int32_t bfloat16_to_int32_round_to_zero(bfloat16, float_status *status);
38
int64_t bfloat16_to_int64_round_to_zero(bfloat16, float_status *status);
39
40
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode,
41
+ int, float_status *status);
42
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode,
43
int, float_status *status);
44
uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
45
@@ -XXX,XX +XXX,XX @@ uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
46
uint64_t bfloat16_to_uint64_scalbn(bfloat16 a, FloatRoundMode,
47
int, float_status *status);
48
49
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *status);
50
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *status);
51
uint32_t bfloat16_to_uint32(bfloat16 a, float_status *status);
52
uint64_t bfloat16_to_uint64(bfloat16 a, float_status *status);
53
54
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *status);
55
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *status);
56
uint32_t bfloat16_to_uint32_round_to_zero(bfloat16 a, float_status *status);
57
uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *status);
58
59
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int, float_status *status);
60
bfloat16 int16_to_bfloat16_scalbn(int16_t a, int, float_status *status);
61
bfloat16 int32_to_bfloat16_scalbn(int32_t a, int, float_status *status);
62
bfloat16 int64_to_bfloat16_scalbn(int64_t a, int, float_status *status);
63
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int, float_status *status);
64
bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int, float_status *status);
65
bfloat16 uint32_to_bfloat16_scalbn(uint32_t a, int, float_status *status);
66
bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int, float_status *status);
67
68
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status);
69
bfloat16 int16_to_bfloat16(int16_t a, float_status *status);
70
bfloat16 int32_to_bfloat16(int32_t a, float_status *status);
71
bfloat16 int64_to_bfloat16(int64_t a, float_status *status);
72
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status);
73
bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status);
74
bfloat16 uint32_to_bfloat16(uint32_t a, float_status *status);
75
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status);
76
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
77
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
78
--- a/fpu/softfloat.c
34
--- a/fpu/softfloat.c
79
+++ b/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
80
@@ -XXX,XX +XXX,XX @@ int64_t float64_to_int64_scalbn(float64 a, FloatRoundMode rmode, int scale,
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
81
return parts_float_to_sint(&p, rmode, scale, INT64_MIN, INT64_MAX, s);
37
if (unlikely(!can_use_fpu(s))) {
82
}
38
goto soft;
83
39
}
84
+int8_t bfloat16_to_int8_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
85
+ float_status *s)
41
+ goto soft;
86
+{
42
+ }
87
+ FloatParts64 p;
43
88
+
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
89
+ bfloat16_unpack_canonical(&p, a, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
90
+ return parts_float_to_sint(&p, rmode, scale, INT8_MIN, INT8_MAX, s);
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
91
+}
47
index XXXXXXX..XXXXXXX 100644
92
+
48
--- a/fpu/softfloat-parts.c.inc
93
int16_t bfloat16_to_int16_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
49
+++ b/fpu/softfloat-parts.c.inc
94
float_status *s)
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
95
{
51
goto return_normal;
96
@@ -XXX,XX +XXX,XX @@ int64_t floatx80_to_int64_round_to_zero(floatx80 a, float_status *s)
52
}
97
return floatx80_to_int64_scalbn(a, float_round_to_zero, 0, s);
53
if (c->cls == float_class_zero) {
98
}
54
- if (a->sign != c->sign) {
99
55
+ if (flags & float_muladd_suppress_add_product_zero) {
100
+int8_t bfloat16_to_int8(bfloat16 a, float_status *s)
56
+ a->sign = c->sign;
101
+{
57
+ } else if (a->sign != c->sign) {
102
+ return bfloat16_to_int8_scalbn(a, s->float_rounding_mode, 0, s);
58
goto return_sub_zero;
103
+}
59
}
104
+
60
goto return_zero;
105
int16_t bfloat16_to_int16(bfloat16 a, float_status *s)
106
{
107
return bfloat16_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
108
@@ -XXX,XX +XXX,XX @@ int64_t bfloat16_to_int64(bfloat16 a, float_status *s)
109
return bfloat16_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
110
}
111
112
+int8_t bfloat16_to_int8_round_to_zero(bfloat16 a, float_status *s)
113
+{
114
+ return bfloat16_to_int8_scalbn(a, float_round_to_zero, 0, s);
115
+}
116
+
117
int16_t bfloat16_to_int16_round_to_zero(bfloat16 a, float_status *s)
118
{
119
return bfloat16_to_int16_scalbn(a, float_round_to_zero, 0, s);
120
@@ -XXX,XX +XXX,XX @@ uint64_t float64_to_uint64_scalbn(float64 a, FloatRoundMode rmode, int scale,
121
return parts_float_to_uint(&p, rmode, scale, UINT64_MAX, s);
122
}
123
124
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode rmode,
125
+ int scale, float_status *s)
126
+{
127
+ FloatParts64 p;
128
+
129
+ bfloat16_unpack_canonical(&p, a, s);
130
+ return parts_float_to_uint(&p, rmode, scale, UINT8_MAX, s);
131
+}
132
+
133
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode rmode,
134
int scale, float_status *s)
135
{
136
@@ -XXX,XX +XXX,XX @@ Int128 float128_to_uint128_round_to_zero(float128 a, float_status *s)
137
return float128_to_uint128_scalbn(a, float_round_to_zero, 0, s);
138
}
139
140
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *s)
141
+{
142
+ return bfloat16_to_uint8_scalbn(a, s->float_rounding_mode, 0, s);
143
+}
144
+
145
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *s)
146
{
147
return bfloat16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
148
@@ -XXX,XX +XXX,XX @@ uint64_t bfloat16_to_uint64(bfloat16 a, float_status *s)
149
return bfloat16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
150
}
151
152
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *s)
153
+{
154
+ return bfloat16_to_uint8_scalbn(a, float_round_to_zero, 0, s);
155
+}
156
+
157
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *s)
158
{
159
return bfloat16_to_uint16_scalbn(a, float_round_to_zero, 0, s);
160
@@ -XXX,XX +XXX,XX @@ bfloat16 int16_to_bfloat16_scalbn(int16_t a, int scale, float_status *status)
161
return int64_to_bfloat16_scalbn(a, scale, status);
162
}
163
164
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int scale, float_status *status)
165
+{
166
+ return int64_to_bfloat16_scalbn(a, scale, status);
167
+}
168
+
169
bfloat16 int64_to_bfloat16(int64_t a, float_status *status)
170
{
171
return int64_to_bfloat16_scalbn(a, 0, status);
172
@@ -XXX,XX +XXX,XX @@ bfloat16 int16_to_bfloat16(int16_t a, float_status *status)
173
return int64_to_bfloat16_scalbn(a, 0, status);
174
}
175
176
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status)
177
+{
178
+ return int64_to_bfloat16_scalbn(a, 0, status);
179
+}
180
+
181
float128 int128_to_float128(Int128 a, float_status *status)
182
{
183
FloatParts128 p = { };
184
@@ -XXX,XX +XXX,XX @@ bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int scale, float_status *status)
185
return uint64_to_bfloat16_scalbn(a, scale, status);
186
}
187
188
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int scale, float_status *status)
189
+{
190
+ return uint64_to_bfloat16_scalbn(a, scale, status);
191
+}
192
+
193
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status)
194
{
195
return uint64_to_bfloat16_scalbn(a, 0, status);
196
@@ -XXX,XX +XXX,XX @@ bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status)
197
return uint64_to_bfloat16_scalbn(a, 0, status);
198
}
199
200
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status)
201
+{
202
+ return uint64_to_bfloat16_scalbn(a, 0, status);
203
+}
204
+
205
float128 uint64_to_float128(uint64_t a, float_status *status)
206
{
207
FloatParts128 p;
208
--
61
--
209
2.34.1
62
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
2
3
3
Lower the following ops:
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
- shli_vec
6
- shrv_vec
7
- sarv_vec
8
9
Signed-off-by: Jiajie Chen <c@jia.je>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20230908022302.180442-14-c@jia.je>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
tcg/loongarch64/tcg-target.h | 2 +-
7
target/hexagon/fma_emu.h | 1 -
15
tcg/loongarch64/tcg-target.c.inc | 21 +++++++++++++++++++++
8
target/hexagon/fma_emu.c | 8 --------
16
2 files changed, 22 insertions(+), 1 deletion(-)
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
17
11
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
19
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/loongarch64/tcg-target.h
14
--- a/target/hexagon/fma_emu.h
21
+++ b/tcg/loongarch64/tcg-target.h
15
+++ b/target/hexagon/fma_emu.h
22
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
23
#define TCG_TARGET_HAS_nor_vec 1
17
float32 infinite_float32(uint8_t sign);
24
#define TCG_TARGET_HAS_eqv_vec 0
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
25
#define TCG_TARGET_HAS_mul_vec 1
19
int scale, float_status *fp_status);
26
-#define TCG_TARGET_HAS_shi_vec 0
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
27
+#define TCG_TARGET_HAS_shi_vec 1
21
float64 internal_mpyhh(float64 a, float64 b,
28
#define TCG_TARGET_HAS_shs_vec 0
22
unsigned long long int accumulated,
29
#define TCG_TARGET_HAS_shv_vec 1
23
float_status *fp_status);
30
#define TCG_TARGET_HAS_roti_vec 0
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
31
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/loongarch64/tcg-target.c.inc
26
--- a/target/hexagon/fma_emu.c
34
+++ b/tcg/loongarch64/tcg-target.c.inc
27
+++ b/target/hexagon/fma_emu.c
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
36
static const LoongArchInsn sarv_vec_insn[4] = {
29
return accum_round_float32(result, fp_status);
37
OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
30
}
38
};
31
39
+ static const LoongArchInsn shli_vec_insn[4] = {
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
40
+ OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
33
-{
41
+ };
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
42
+ static const LoongArchInsn shri_vec_insn[4] = {
35
- return float32_mul(a, b, fp_status);
43
+ OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
36
- }
44
+ };
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
45
+ static const LoongArchInsn sari_vec_insn[4] = {
38
-}
46
+ OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
39
-
47
+ };
40
float64 internal_mpyhh(float64 a, float64 b,
48
41
unsigned long long int accumulated,
49
a0 = args[0];
42
float_status *fp_status)
50
a1 = args[1];
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
44
index XXXXXXX..XXXXXXX 100644
52
case INDEX_op_sarv_vec:
45
--- a/target/hexagon/op_helper.c
53
tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
46
+++ b/target/hexagon/op_helper.c
54
break;
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
55
+ case INDEX_op_shli_vec:
48
{
56
+ tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
49
float32 RdV;
57
+ break;
50
arch_fpop_start(env);
58
+ case INDEX_op_shri_vec:
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
59
+ tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
60
+ break;
53
arch_fpop_end(env);
61
+ case INDEX_op_sari_vec:
54
return RdV;
62
+ tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
55
}
63
+ break;
64
case INDEX_op_bitsel_vec:
65
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
66
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
67
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
68
69
case INDEX_op_not_vec:
70
case INDEX_op_neg_vec:
71
+ case INDEX_op_shli_vec:
72
+ case INDEX_op_shri_vec:
73
+ case INDEX_op_sari_vec:
74
return C_O1_I1(w, w);
75
76
case INDEX_op_bitsel_vec:
77
--
56
--
78
2.34.1
57
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
There are no special cases for this instruction.
2
2
3
Lower the following ops:
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
- shlv_vec
6
- shrv_vec
7
- sarv_vec
8
9
Signed-off-by: Jiajie Chen <c@jia.je>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20230908022302.180442-12-c@jia.je>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
5
---
14
tcg/loongarch64/tcg-target.h | 2 +-
6
target/hexagon/op_helper.c | 2 +-
15
tcg/loongarch64/tcg-target.c.inc | 24 ++++++++++++++++++++++++
7
1 file changed, 1 insertion(+), 1 deletion(-)
16
2 files changed, 25 insertions(+), 1 deletion(-)
17
8
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
19
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/loongarch64/tcg-target.h
11
--- a/target/hexagon/op_helper.c
21
+++ b/tcg/loongarch64/tcg-target.h
12
+++ b/target/hexagon/op_helper.c
22
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
23
#define TCG_TARGET_HAS_mul_vec 1
14
float32 RsV, float32 RtV)
24
#define TCG_TARGET_HAS_shi_vec 0
15
{
25
#define TCG_TARGET_HAS_shs_vec 0
16
arch_fpop_start(env);
26
-#define TCG_TARGET_HAS_shv_vec 0
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
27
+#define TCG_TARGET_HAS_shv_vec 1
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
28
#define TCG_TARGET_HAS_roti_vec 0
19
arch_fpop_end(env);
29
#define TCG_TARGET_HAS_rots_vec 0
20
return RxV;
30
#define TCG_TARGET_HAS_rotv_vec 0
21
}
31
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/loongarch64/tcg-target.c.inc
34
+++ b/tcg/loongarch64/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
36
static const LoongArchInsn ussub_vec_insn[4] = {
37
OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
38
};
39
+ static const LoongArchInsn shlv_vec_insn[4] = {
40
+ OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
41
+ };
42
+ static const LoongArchInsn shrv_vec_insn[4] = {
43
+ OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
44
+ };
45
+ static const LoongArchInsn sarv_vec_insn[4] = {
46
+ OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
47
+ };
48
49
a0 = args[0];
50
a1 = args[1];
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
52
case INDEX_op_ussub_vec:
53
tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
54
break;
55
+ case INDEX_op_shlv_vec:
56
+ tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
57
+ break;
58
+ case INDEX_op_shrv_vec:
59
+ tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
60
+ break;
61
+ case INDEX_op_sarv_vec:
62
+ tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
63
+ break;
64
case INDEX_op_dupm_vec:
65
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
66
break;
67
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
68
case INDEX_op_usadd_vec:
69
case INDEX_op_sssub_vec:
70
case INDEX_op_ussub_vec:
71
+ case INDEX_op_shlv_vec:
72
+ case INDEX_op_shrv_vec:
73
+ case INDEX_op_sarv_vec:
74
return 1;
75
default:
76
return 0;
77
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
78
case INDEX_op_usadd_vec:
79
case INDEX_op_sssub_vec:
80
case INDEX_op_ussub_vec:
81
+ case INDEX_op_shlv_vec:
82
+ case INDEX_op_shrv_vec:
83
+ case INDEX_op_sarv_vec:
84
return C_O1_I2(w, w, w);
85
86
case INDEX_op_not_vec:
87
--
22
--
88
2.34.1
23
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
2
4
3
Lower the following ops:
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
- ssadd_vec
6
- usadd_vec
7
- sssub_vec
8
- ussub_vec
9
10
Signed-off-by: Jiajie Chen <c@jia.je>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-Id: <20230908022302.180442-11-c@jia.je>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
7
---
15
tcg/loongarch64/tcg-target.h | 2 +-
8
target/hexagon/op_helper.c | 5 ++---
16
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
9
1 file changed, 2 insertions(+), 3 deletions(-)
17
2 files changed, 33 insertions(+), 1 deletion(-)
18
10
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
20
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/loongarch64/tcg-target.h
13
--- a/target/hexagon/op_helper.c
22
+++ b/tcg/loongarch64/tcg-target.h
14
+++ b/target/hexagon/op_helper.c
23
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
24
#define TCG_TARGET_HAS_roti_vec 0
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
25
#define TCG_TARGET_HAS_rots_vec 0
17
float32 RsV, float32 RtV)
26
#define TCG_TARGET_HAS_rotv_vec 0
18
{
27
-#define TCG_TARGET_HAS_sat_vec 0
19
- float32 neg_RsV;
28
+#define TCG_TARGET_HAS_sat_vec 1
20
arch_fpop_start(env);
29
#define TCG_TARGET_HAS_minmax_vec 1
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
30
#define TCG_TARGET_HAS_bitsel_vec 0
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
31
#define TCG_TARGET_HAS_cmpsel_vec 0
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
24
+ &env->fp_status);
33
index XXXXXXX..XXXXXXX 100644
25
arch_fpop_end(env);
34
--- a/tcg/loongarch64/tcg-target.c.inc
26
return RxV;
35
+++ b/tcg/loongarch64/tcg-target.c.inc
27
}
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
static const LoongArchInsn umax_vec_insn[4] = {
38
OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
39
};
40
+ static const LoongArchInsn ssadd_vec_insn[4] = {
41
+ OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
42
+ };
43
+ static const LoongArchInsn usadd_vec_insn[4] = {
44
+ OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
45
+ };
46
+ static const LoongArchInsn sssub_vec_insn[4] = {
47
+ OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
48
+ };
49
+ static const LoongArchInsn ussub_vec_insn[4] = {
50
+ OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
51
+ };
52
53
a0 = args[0];
54
a1 = args[1];
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
case INDEX_op_umax_vec:
57
tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
58
break;
59
+ case INDEX_op_ssadd_vec:
60
+ tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
61
+ break;
62
+ case INDEX_op_usadd_vec:
63
+ tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
64
+ break;
65
+ case INDEX_op_sssub_vec:
66
+ tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
67
+ break;
68
+ case INDEX_op_ussub_vec:
69
+ tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
70
+ break;
71
case INDEX_op_dupm_vec:
72
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
73
break;
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
case INDEX_op_smax_vec:
76
case INDEX_op_umin_vec:
77
case INDEX_op_umax_vec:
78
+ case INDEX_op_ssadd_vec:
79
+ case INDEX_op_usadd_vec:
80
+ case INDEX_op_sssub_vec:
81
+ case INDEX_op_ussub_vec:
82
return 1;
83
default:
84
return 0;
85
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
86
case INDEX_op_smax_vec:
87
case INDEX_op_umin_vec:
88
case INDEX_op_umax_vec:
89
+ case INDEX_op_ssadd_vec:
90
+ case INDEX_op_usadd_vec:
91
+ case INDEX_op_sssub_vec:
92
+ case INDEX_op_ussub_vec:
93
return C_O1_I2(w, w, w);
94
95
case INDEX_op_not_vec:
96
--
28
--
97
2.34.1
29
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
2
5
3
Lower the following ops:
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
- smin_vec
6
- smax_vec
7
- umin_vec
8
- umax_vec
9
10
Signed-off-by: Jiajie Chen <c@jia.je>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-Id: <20230908022302.180442-10-c@jia.je>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
8
---
15
tcg/loongarch64/tcg-target.h | 2 +-
9
target/hexagon/op_helper.c | 11 +++--------
16
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
10
1 file changed, 3 insertions(+), 8 deletions(-)
17
2 files changed, 33 insertions(+), 1 deletion(-)
18
11
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
20
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/loongarch64/tcg-target.h
14
--- a/target/hexagon/op_helper.c
22
+++ b/tcg/loongarch64/tcg-target.h
15
+++ b/target/hexagon/op_helper.c
23
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
24
#define TCG_TARGET_HAS_rots_vec 0
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
25
#define TCG_TARGET_HAS_rotv_vec 0
18
float32 RsV, float32 RtV, float32 PuV)
26
#define TCG_TARGET_HAS_sat_vec 0
19
{
27
-#define TCG_TARGET_HAS_minmax_vec 0
20
- size4s_t tmp;
28
+#define TCG_TARGET_HAS_minmax_vec 1
21
arch_fpop_start(env);
29
#define TCG_TARGET_HAS_bitsel_vec 0
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
30
#define TCG_TARGET_HAS_cmpsel_vec 0
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
31
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
33
index XXXXXXX..XXXXXXX 100644
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
34
--- a/tcg/loongarch64/tcg-target.c.inc
27
- RxV = tmp;
35
+++ b/tcg/loongarch64/tcg-target.c.inc
28
- }
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
37
static const LoongArchInsn mul_vec_insn[4] = {
30
+ float_muladd_suppress_add_product_zero,
38
OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
31
+ &env->fp_status);
39
};
32
arch_fpop_end(env);
40
+ static const LoongArchInsn smin_vec_insn[4] = {
33
return RxV;
41
+ OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
34
}
42
+ };
43
+ static const LoongArchInsn umin_vec_insn[4] = {
44
+ OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
45
+ };
46
+ static const LoongArchInsn smax_vec_insn[4] = {
47
+ OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
48
+ };
49
+ static const LoongArchInsn umax_vec_insn[4] = {
50
+ OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
51
+ };
52
53
a0 = args[0];
54
a1 = args[1];
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
case INDEX_op_mul_vec:
57
tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
58
break;
59
+ case INDEX_op_smin_vec:
60
+ tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
61
+ break;
62
+ case INDEX_op_smax_vec:
63
+ tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
64
+ break;
65
+ case INDEX_op_umin_vec:
66
+ tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
67
+ break;
68
+ case INDEX_op_umax_vec:
69
+ tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
70
+ break;
71
case INDEX_op_dupm_vec:
72
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
73
break;
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
case INDEX_op_not_vec:
76
case INDEX_op_neg_vec:
77
case INDEX_op_mul_vec:
78
+ case INDEX_op_smin_vec:
79
+ case INDEX_op_smax_vec:
80
+ case INDEX_op_umin_vec:
81
+ case INDEX_op_umax_vec:
82
return 1;
83
default:
84
return 0;
85
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
86
case INDEX_op_xor_vec:
87
case INDEX_op_nor_vec:
88
case INDEX_op_mul_vec:
89
+ case INDEX_op_smin_vec:
90
+ case INDEX_op_smax_vec:
91
+ case INDEX_op_umin_vec:
92
+ case INDEX_op_umax_vec:
93
return C_O1_I2(w, w, w);
94
95
case INDEX_op_not_vec:
96
--
35
--
97
2.34.1
36
2.43.0
diff view generated by jsdifflib
1
The prologue is entered via "call"; the epilogue, each tb,
1
There are multiple special cases for this instruction.
2
and each goto_tb continuation point are all reached via "jump".
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
3
8
4
As tcg_out_goto_long is only used by tcg_out_exit_tb, merge
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
the two functions. Change the indirect register used to
6
TCG_REG_TMP1, aka X17, so that the BTI condition created
7
is "jump" instead of "jump or call".
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
11
---
12
tcg/aarch64/tcg-target.c.inc | 54 ++++++++++++++++++++++++++----------
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
13
1 file changed, 39 insertions(+), 15 deletions(-)
13
1 file changed, 26 insertions(+), 79 deletions(-)
14
14
15
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/aarch64/tcg-target.c.inc
17
--- a/target/hexagon/op_helper.c
18
+++ b/tcg/aarch64/tcg-target.c.inc
18
+++ b/target/hexagon/op_helper.c
19
@@ -XXX,XX +XXX,XX @@ typedef enum {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
20
DMB_ISH = 0xd50338bf,
20
return RxV;
21
DMB_LD = 0x00000100,
22
DMB_ST = 0x00000200,
23
+
24
+ BTI_C = 0xd503245f,
25
+ BTI_J = 0xd503249f,
26
+ BTI_JC = 0xd50324df,
27
} AArch64Insn;
28
29
static inline uint32_t tcg_in32(TCGContext *s)
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
31
| rn << 5 | (rd & 0x1f));
32
}
21
}
33
22
34
+static void tcg_out_bti(TCGContext *s, AArch64Insn insn)
23
-static bool is_zero_prod(float32 a, float32 b)
35
+{
36
+ /*
37
+ * While BTI insns are nops on hosts without FEAT_BTI,
38
+ * there is no point in emitting them in that case either.
39
+ */
40
+ if (cpuinfo & CPUINFO_BTI) {
41
+ tcg_out32(s, insn);
42
+ }
43
+}
44
+
45
/* Register to register move using ORR (shifted register with no shift). */
46
static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
47
{
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
49
tcg_out_insn(s, 3206, B, offset);
50
}
51
52
-static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
53
-{
24
-{
54
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
25
- return ((float32_is_zero(a) && is_finite(b)) ||
55
- if (offset == sextract64(offset, 0, 26)) {
26
- (float32_is_zero(b) && is_finite(a)));
56
- tcg_out_insn(s, 3206, B, offset);
57
- } else {
58
- /* Choose X9 as a call-clobbered non-LR temporary. */
59
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
60
- tcg_out_insn(s, 3207, BR, TCG_REG_X9);
61
- }
62
-}
27
-}
63
-
28
-
64
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target)
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
30
-{
31
- float32 ret = dst;
32
- if (float32_is_any_nan(x)) {
33
- if (extract32(x, 22, 1) == 0) {
34
- float_raise(float_flag_invalid, fp_status);
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
65
{
43
{
66
ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
67
@@ -XXX,XX +XXX,XX @@ static const tcg_insn_unit *tb_ret_addr;
45
return RxV;
68
46
}
69
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
47
48
-static bool is_inf_prod(int32_t a, int32_t b)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
50
+ float32 RsV, float32 RtV, int negate)
70
{
51
{
71
+ const tcg_insn_unit *target;
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
72
+ ptrdiff_t offset;
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
73
+
56
+
74
/* Reuse the zeroing that exists for goto_ptr. */
57
+ arch_fpop_start(env);
75
if (a0 == 0) {
58
+
76
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
77
+ target = tcg_code_gen_epilogue;
60
+ RxV = float32_muladd(RsV, RtV, RxV,
78
} else {
61
+ negate | float_muladd_suppress_add_product_zero,
79
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
62
+ &env->fp_status);
80
- tcg_out_goto_long(s, tb_ret_addr);
63
+
81
+ target = tb_ret_addr;
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
72
+ }
82
+ }
73
+ }
83
+
74
+
84
+ offset = tcg_pcrel_diff(s, target) >> 2;
75
+ arch_fpop_end(env);
85
+ if (offset == sextract64(offset, 0, 26)) {
76
+ return RxV;
86
+ tcg_out_insn(s, 3206, B, offset);
87
+ } else {
88
+ /*
89
+ * Only x16/x17 generate BTI type Jump (2),
90
+ * other registers generate BTI type Jump|Call (3).
91
+ */
92
+ QEMU_BUILD_BUG_ON(TCG_REG_TMP0 != TCG_REG_X16);
93
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
94
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
95
}
96
}
77
}
97
78
98
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
99
tcg_out32(s, I3206_B);
80
float32 RsV, float32 RtV)
100
tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
81
{
101
set_jmp_reset_offset(s, which);
82
- bool infinp;
102
+ tcg_out_bti(s, BTI_J);
83
- bool infminusinf;
84
- float32 tmp;
85
-
86
- arch_fpop_start(env);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
103
}
111
}
104
112
105
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
106
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
114
float32 RsV, float32 RtV)
107
{
115
{
108
TCGReg r;
116
- bool infinp;
109
117
- bool infminusinf;
110
+ tcg_out_bti(s, BTI_C);
118
- float32 tmp;
111
+
119
-
112
/* Push (FP, LR) and allocate space for all saved registers. */
120
- arch_fpop_start(env);
113
tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
114
TCG_REG_SP, -PUSH_SIZE, 1, 1);
122
- infminusinf = float32_is_infinity(RxV) &&
115
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
123
- is_inf_prod(RsV, RtV) &&
116
* and fall through to the rest of the epilogue.
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
117
*/
125
- infinp = float32_is_infinity(RxV) ||
118
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
126
- float32_is_infinity(RtV) ||
119
+ tcg_out_bti(s, BTI_J);
127
- float32_is_infinity(RsV);
120
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
121
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
122
/* TB epilogue */
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
123
tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
124
+ tcg_out_bti(s, BTI_J);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
125
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
126
/* Remove TCG locals stack space. */
134
- RxV = tmp;
127
tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
135
- }
128
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
136
- set_float_exception_flags(0, &env->fp_status);
129
137
- if (float32_is_infinity(RxV) && !infinp) {
130
static void tcg_out_tb_start(TCGContext *s)
138
- RxV = RxV - 1;
131
{
139
- }
132
- /* nothing to do */
140
- if (infminusinf) {
133
+ tcg_out_bti(s, BTI_J);
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
134
}
146
}
135
147
136
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
137
--
149
--
138
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
The function is now unused.
2
2
3
Signed-off-by: Jiajie Chen <c@jia.je>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-9-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tcg/loongarch64/tcg-target.h | 2 +-
6
target/hexagon/fma_emu.h | 2 -
9
tcg/loongarch64/tcg-target.c.inc | 8 ++++++++
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
10
2 files changed, 9 insertions(+), 1 deletion(-)
8
2 files changed, 173 deletions(-)
11
9
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
12
--- a/target/hexagon/fma_emu.h
15
+++ b/tcg/loongarch64/tcg-target.h
13
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
17
#define TCG_TARGET_HAS_nand_vec 0
15
}
18
#define TCG_TARGET_HAS_nor_vec 1
16
int32_t float32_getexp(float32 f32);
19
#define TCG_TARGET_HAS_eqv_vec 0
17
float32 infinite_float32(uint8_t sign);
20
-#define TCG_TARGET_HAS_mul_vec 0
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
21
+#define TCG_TARGET_HAS_mul_vec 1
19
- int scale, float_status *fp_status);
22
#define TCG_TARGET_HAS_shi_vec 0
20
float64 internal_mpyhh(float64 a, float64 b,
23
#define TCG_TARGET_HAS_shs_vec 0
21
unsigned long long int accumulated,
24
#define TCG_TARGET_HAS_shv_vec 0
22
float_status *fp_status);
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
26
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
25
--- a/target/hexagon/fma_emu.c
28
+++ b/tcg/loongarch64/tcg-target.c.inc
26
+++ b/target/hexagon/fma_emu.c
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
30
static const LoongArchInsn neg_vec_insn[4] = {
28
return -1;
31
OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
29
}
32
};
30
33
+ static const LoongArchInsn mul_vec_insn[4] = {
31
-static uint64_t float32_getmant(float32 f32)
34
+ OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
32
-{
35
+ };
33
- Float a = { .i = f32 };
36
34
- if (float32_is_normal(f32)) {
37
a0 = args[0];
35
- return a.mant | 1ULL << 23;
38
a1 = args[1];
36
- }
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
- if (float32_is_zero(f32)) {
40
case INDEX_op_neg_vec:
38
- return 0;
41
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
39
- }
42
break;
40
- if (float32_is_denormal(f32)) {
43
+ case INDEX_op_mul_vec:
41
- return a.mant;
44
+ tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
42
- }
45
+ break;
43
- return ~0ULL;
46
case INDEX_op_dupm_vec:
44
-}
47
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
45
-
48
break;
46
int32_t float32_getexp(float32 f32)
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
47
{
50
case INDEX_op_nor_vec:
48
Float a = { .i = f32 };
51
case INDEX_op_not_vec:
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
52
case INDEX_op_neg_vec:
50
}
53
+ case INDEX_op_mul_vec:
51
54
return 1;
52
/* Return a maximum finite value with the requested sign */
55
default:
53
-static float32 maxfinite_float32(uint8_t sign)
56
return 0;
54
-{
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
55
- if (sign) {
58
case INDEX_op_orc_vec:
56
- return make_float32(SF_MINUS_MAXF);
59
case INDEX_op_xor_vec:
57
- } else {
60
case INDEX_op_nor_vec:
58
- return make_float32(SF_MAXF);
61
+ case INDEX_op_mul_vec:
59
- }
62
return C_O1_I2(w, w, w);
60
-}
63
61
-
64
case INDEX_op_not_vec:
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
65
--
219
--
66
2.34.1
220
2.43.0
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
1
This massive macro is now only used once.
2
Expand it for use only by float64.
2
3
3
When memory region is ram, the lower TARGET_PAGE_BITS is not the
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
physical section number. Instead, its value is always 0.
5
6
Add comment and assert to make it clear.
7
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
Message-Id: <20230901060118.379-1-zhiwei_liu@linux.alibaba.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
include/exec/cpu-defs.h | 12 ++++++------
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
14
accel/tcg/cputlb.c | 11 +++++++----
8
1 file changed, 127 insertions(+), 128 deletions(-)
15
2 files changed, 13 insertions(+), 10 deletions(-)
16
9
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
12
--- a/target/hexagon/fma_emu.c
20
+++ b/include/exec/cpu-defs.h
13
+++ b/target/hexagon/fma_emu.c
21
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
22
typedef struct CPUTLBEntryFull {
15
}
23
/*
16
24
* @xlat_section contains:
17
/* Return a maximum finite value with the requested sign */
25
- * - in the lower TARGET_PAGE_BITS, a physical section number
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
26
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
27
- * must be added to the virtual address to obtain:
20
-{ \
28
- * + the ram_addr_t of the target RAM (if the physical section
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
29
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
30
- * + the offset within the target MemoryRegion (otherwise)
23
- /* result zero */ \
31
+ * - For ram, an offset which must be added to the virtual address
24
- switch (fp_status->float_rounding_mode) { \
32
+ * to obtain the ram_addr_t of the target RAM
25
- case float_round_down: \
33
+ * - For other memory regions,
26
- return zero_##SUFFIX(1); \
34
+ * + in the lower TARGET_PAGE_BITS, the physical section number
27
- default: \
35
+ * + with the TARGET_PAGE_BITS masked off, the offset within
28
- return zero_##SUFFIX(0); \
36
+ * the target MemoryRegion
29
- } \
37
*/
30
- } \
38
hwaddr xlat_section;
31
- /* Normalize right */ \
39
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
40
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
41
index XXXXXXX..XXXXXXX 100644
34
- /* So we need to normalize right while the high word is non-zero and \
42
--- a/accel/tcg/cputlb.c
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
43
+++ b/accel/tcg/cputlb.c
36
- while ((int128_gethi(a.mant) != 0) || \
44
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
45
write_flags = read_flags;
38
- a = accum_norm_right(a, 1); \
46
if (is_ram) {
39
- } \
47
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
40
- /* \
48
+ assert(!(iotlb & ~TARGET_PAGE_MASK));
41
- * OK, now normalize left \
49
/*
42
- * We want to normalize left until we have a leading one in bit 24 \
50
* Computing is_clean is expensive; avoid all that unless
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
51
* the page is actually writable.
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
52
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
45
- * should be 0 \
53
46
- */ \
54
/* refill the tlb */
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
55
/*
48
- a = accum_norm_left(a); \
56
- * At this point iotlb contains a physical section number in the lower
49
- } \
57
- * TARGET_PAGE_BITS, and either
50
- /* \
58
- * + the ram_addr_t of the page base of the target RAM (RAM)
51
- * OK, now we might need to denormalize because of potential underflow. \
59
- * + the offset within section->mr of the page base (I/O, ROMD)
52
- * We need to do this before rounding, and rounding might make us normal \
60
+ * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
53
- * again \
61
+ * aligned ram_addr_t of the page base of the target RAM.
54
- */ \
62
+ * Otherwise, iotlb contains
55
- while (a.exp <= 0) { \
63
+ * - a physical section number in the lower TARGET_PAGE_BITS
56
- a = accum_norm_right(a, 1 - a.exp); \
64
+ * - the offset within section->mr of the page base (I/O, ROMD) with the
57
- /* \
65
+ * TARGET_PAGE_BITS masked off.
58
- * Do we have underflow? \
66
* We subtract addr_page (which is page aligned and thus won't
59
- * That's when we get an inexact answer because we ran out of bits \
67
* disturb the low bits) to give an offset which can be added to the
60
- * in a denormal. \
68
* (non-page-aligned) vaddr of the eventual memory access to get
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
69
--
278
--
70
2.34.1
279
2.43.0
diff view generated by jsdifflib
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
2
3
3
PA-RISC host support is already removed with commit
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
b1cef6d02f ("Drop remaining bits of ia64 host support").
5
6
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-Id: <20230810225922.21600-1-akihiko.odaki@daynix.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
softmmu/async-teardown.c | 3 ---
7
target/hexagon/fma_emu.c | 16 +++-------------
11
1 file changed, 3 deletions(-)
8
1 file changed, 3 insertions(+), 13 deletions(-)
12
9
13
diff --git a/softmmu/async-teardown.c b/softmmu/async-teardown.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/softmmu/async-teardown.c
12
--- a/target/hexagon/fma_emu.c
16
+++ b/softmmu/async-teardown.c
13
+++ b/target/hexagon/fma_emu.c
17
@@ -XXX,XX +XXX,XX @@ static void *new_stack_for_clone(void)
14
@@ -XXX,XX +XXX,XX @@ typedef union {
18
15
};
19
/* Allocate a new stack and get a pointer to its top. */
16
} Double;
20
stack_ptr = qemu_alloc_stack(&stack_size);
17
21
-#if !defined(HOST_HPPA)
18
-typedef union {
22
- /* The top is at the end of the area, except on HPPA. */
19
- float f;
23
stack_ptr += stack_size;
20
- uint32_t i;
24
-#endif
21
- struct {
25
22
- uint32_t mant:23;
26
return stack_ptr;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
27
}
46
}
28
--
47
--
29
2.34.1
48
2.43.0
diff view generated by jsdifflib
1
Split out int_ld_mmio_beN, to be used by both do_ld_mmio_beN
1
This structure, with bitfields, is incorrect for big-endian.
2
and do_ld16_mmio_beN. Move the locks down into the two
2
Use extract64 and deposit64 instead.
3
functions, since each one now covers all accesses to once page.
4
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/cputlb.c | 91 ++++++++++++++++++++++++++++++----------------
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
8
1 file changed, 59 insertions(+), 32 deletions(-)
8
1 file changed, 16 insertions(+), 30 deletions(-)
9
9
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
12
--- a/target/hexagon/fma_emu.c
13
+++ b/accel/tcg/cputlb.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
14
@@ -XXX,XX +XXX,XX @@
15
* Load @size bytes from @addr, which is memory-mapped i/o.
15
16
* The bytes are concatenated in big-endian order with @ret_be.
16
#define WAY_BIG_EXP 4096
17
*/
17
18
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
18
-typedef union {
19
- uint64_t ret_be, vaddr addr, int size,
19
- double f;
20
- int mmu_idx, MMUAccessType type, uintptr_t ra)
20
- uint64_t i;
21
+static uint64_t int_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
21
- struct {
22
+ uint64_t ret_be, vaddr addr, int size,
22
- uint64_t mant:52;
23
+ int mmu_idx, MMUAccessType type, uintptr_t ra,
23
- uint64_t exp:11;
24
+ MemoryRegion *mr, hwaddr mr_offset)
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
25
{
29
{
26
- MemoryRegionSection *section;
30
- Double a = { .i = f64 };
27
- hwaddr mr_offset;
31
+ uint64_t mant = extract64(f64, 0, 52);
28
- MemoryRegion *mr;
32
if (float64_is_normal(f64)) {
29
- MemTxAttrs attrs;
33
- return a.mant | 1ULL << 52;
30
-
34
+ return mant | 1ULL << 52;
31
- tcg_debug_assert(size > 0 && size <= 8);
35
}
32
-
36
if (float64_is_zero(f64)) {
33
- attrs = full->attrs;
37
return 0;
34
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
38
}
35
- mr = section->mr;
39
if (float64_is_denormal(f64)) {
36
-
40
- return a.mant;
37
do {
41
+ return mant;
38
MemOp this_mop;
42
}
39
unsigned this_size;
43
return ~0ULL;
40
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
41
this_size = 1 << this_mop;
42
this_mop |= MO_BE;
43
44
- r = memory_region_dispatch_read(mr, mr_offset, &val, this_mop, attrs);
45
+ r = memory_region_dispatch_read(mr, mr_offset, &val,
46
+ this_mop, full->attrs);
47
if (unlikely(r != MEMTX_OK)) {
48
io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
49
}
50
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
51
return ret_be;
52
}
44
}
53
45
54
+static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
46
int32_t float64_getexp(float64 f64)
55
+ uint64_t ret_be, vaddr addr, int size,
47
{
56
+ int mmu_idx, MMUAccessType type, uintptr_t ra)
48
- Double a = { .i = f64 };
57
+{
49
+ int exp = extract64(f64, 52, 11);
58
+ MemoryRegionSection *section;
50
if (float64_is_normal(f64)) {
59
+ MemoryRegion *mr;
51
- return a.exp;
60
+ hwaddr mr_offset;
52
+ return exp;
61
+ MemTxAttrs attrs;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
62
+ uint64_t ret;
64
+ uint64_t ret;
63
+
65
+
64
+ tcg_debug_assert(size > 0 && size <= 8);
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
65
+
67
&& ((a.guard | a.round | a.sticky) == 0)) {
66
+ attrs = full->attrs;
68
/* result zero */
67
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
68
+ mr = section->mr;
70
}
69
+
71
}
70
+ qemu_mutex_lock_iothread();
72
/* Underflow? */
71
+ ret = int_ld_mmio_beN(env, full, ret_be, addr, size, mmu_idx,
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
72
+ type, ra, mr, mr_offset);
74
+ ret = int128_getlo(a.mant);
73
+ qemu_mutex_unlock_iothread();
75
+ if (ret & (1ULL << DF_MANTBITS)) {
74
+
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
75
+ return ret;
96
+ return ret;
76
+}
97
}
77
+
98
78
+static Int128 do_ld16_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
99
float64 internal_mpyhh(float64 a, float64 b,
79
+ uint64_t ret_be, vaddr addr, int size,
80
+ int mmu_idx, uintptr_t ra)
81
+{
82
+ MemoryRegionSection *section;
83
+ MemoryRegion *mr;
84
+ hwaddr mr_offset;
85
+ MemTxAttrs attrs;
86
+ uint64_t a, b;
87
+
88
+ tcg_debug_assert(size > 8 && size <= 16);
89
+
90
+ attrs = full->attrs;
91
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
92
+ mr = section->mr;
93
+
94
+ qemu_mutex_lock_iothread();
95
+ a = int_ld_mmio_beN(env, full, ret_be, addr, size - 8, mmu_idx,
96
+ MMU_DATA_LOAD, ra, mr, mr_offset);
97
+ b = int_ld_mmio_beN(env, full, ret_be, addr + size - 8, 8, mmu_idx,
98
+ MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
99
+ qemu_mutex_unlock_iothread();
100
+
101
+ return int128_make128(b, a);
102
+}
103
+
104
/**
105
* do_ld_bytes_beN
106
* @p: translation parameters
107
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
108
unsigned tmp, half_size;
109
110
if (unlikely(p->flags & TLB_MMIO)) {
111
- QEMU_IOTHREAD_LOCK_GUARD();
112
return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
113
mmu_idx, type, ra);
114
}
115
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
116
MemOp atom;
117
118
if (unlikely(p->flags & TLB_MMIO)) {
119
- QEMU_IOTHREAD_LOCK_GUARD();
120
- a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
121
- mmu_idx, MMU_DATA_LOAD, ra);
122
- b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
123
- mmu_idx, MMU_DATA_LOAD, ra);
124
- return int128_make128(b, a);
125
+ return do_ld16_mmio_beN(env, p->full, a, p->addr, size, mmu_idx, ra);
126
}
127
128
/*
129
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
130
MMUAccessType type, uintptr_t ra)
131
{
132
if (unlikely(p->flags & TLB_MMIO)) {
133
- QEMU_IOTHREAD_LOCK_GUARD();
134
return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
135
} else {
136
return *(uint8_t *)p->haddr;
137
@@ -XXX,XX +XXX,XX @@ static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
138
uint16_t ret;
139
140
if (unlikely(p->flags & TLB_MMIO)) {
141
- QEMU_IOTHREAD_LOCK_GUARD();
142
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
143
if ((memop & MO_BSWAP) == MO_LE) {
144
ret = bswap16(ret);
145
@@ -XXX,XX +XXX,XX @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
146
uint32_t ret;
147
148
if (unlikely(p->flags & TLB_MMIO)) {
149
- QEMU_IOTHREAD_LOCK_GUARD();
150
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
151
if ((memop & MO_BSWAP) == MO_LE) {
152
ret = bswap32(ret);
153
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
154
uint64_t ret;
155
156
if (unlikely(p->flags & TLB_MMIO)) {
157
- QEMU_IOTHREAD_LOCK_GUARD();
158
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
159
if ((memop & MO_BSWAP) == MO_LE) {
160
ret = bswap64(ret);
161
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
162
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
163
if (likely(!crosspage)) {
164
if (unlikely(l.page[0].flags & TLB_MMIO)) {
165
- QEMU_IOTHREAD_LOCK_GUARD();
166
- a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
167
- l.mmu_idx, MMU_DATA_LOAD, ra);
168
- b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
169
- l.mmu_idx, MMU_DATA_LOAD, ra);
170
- ret = int128_make128(b, a);
171
+ ret = do_ld16_mmio_beN(env, l.page[0].full, 0, addr, 16,
172
+ l.mmu_idx, ra);
173
if ((l.memop & MO_BSWAP) == MO_LE) {
174
ret = bswap128(ret);
175
}
176
--
100
--
177
2.34.1
101
2.43.0
diff view generated by jsdifflib
1
Avoid multiple calls to io_prepare for unaligned acceses.
1
No need to open-code 64x64->128-bit multiplication.
2
One call to do_ld_mmio_beN will never cross pages.
3
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
accel/tcg/cputlb.c | 84 +++++++++++++++++-----------------------------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
7
1 file changed, 30 insertions(+), 54 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
8
8
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/cputlb.c
11
--- a/target/hexagon/fma_emu.c
12
+++ b/accel/tcg/cputlb.c
12
+++ b/target/hexagon/fma_emu.c
13
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
14
}
14
return -1;
15
}
15
}
16
16
17
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
17
-static uint32_t int128_getw0(Int128 x)
18
- int mmu_idx, vaddr addr, uintptr_t retaddr,
19
- MMUAccessType access_type, MemOp op)
20
-{
18
-{
21
- MemoryRegionSection *section;
19
- return int128_getlo(x);
22
- hwaddr mr_offset;
20
-}
23
- MemoryRegion *mr;
24
- MemTxResult r;
25
- uint64_t val;
26
-
21
-
27
- section = io_prepare(&mr_offset, env, full->xlat_section,
22
-static uint32_t int128_getw1(Int128 x)
28
- full->attrs, addr, retaddr);
23
-{
29
- mr = section->mr;
24
- return int128_getlo(x) >> 32;
25
-}
30
-
26
-
31
- {
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
32
- QEMU_IOTHREAD_LOCK_GUARD();
28
{
33
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
29
- Int128 a, b;
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
34
- }
47
- }
35
-
48
-
36
- if (r != MEMTX_OK) {
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
37
- io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
50
+ mulu64(&l, &h, ai, bi);
38
- r, retaddr);
51
+ return int128_make128(l, h);
39
- }
40
- return val;
41
-}
42
-
43
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
44
int mmu_idx, uint64_t val, vaddr addr,
45
uintptr_t retaddr, MemOp op)
46
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
47
uint64_t ret_be, vaddr addr, int size,
48
int mmu_idx, MMUAccessType type, uintptr_t ra)
49
{
50
- uint64_t t;
51
+ MemoryRegionSection *section;
52
+ hwaddr mr_offset;
53
+ MemoryRegion *mr;
54
+ MemTxAttrs attrs;
55
56
tcg_debug_assert(size > 0 && size <= 8);
57
+
58
+ attrs = full->attrs;
59
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
60
+ mr = section->mr;
61
+
62
do {
63
+ MemOp this_mop;
64
+ unsigned this_size;
65
+ uint64_t val;
66
+ MemTxResult r;
67
+
68
/* Read aligned pieces up to 8 bytes. */
69
- switch ((size | (int)addr) & 7) {
70
- case 1:
71
- case 3:
72
- case 5:
73
- case 7:
74
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB);
75
- ret_be = (ret_be << 8) | t;
76
- size -= 1;
77
- addr += 1;
78
- break;
79
- case 2:
80
- case 6:
81
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW);
82
- ret_be = (ret_be << 16) | t;
83
- size -= 2;
84
- addr += 2;
85
- break;
86
- case 4:
87
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL);
88
- ret_be = (ret_be << 32) | t;
89
- size -= 4;
90
- addr += 4;
91
- break;
92
- case 0:
93
- return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ);
94
- default:
95
- qemu_build_not_reached();
96
+ this_mop = ctz32(size | (int)addr | 8);
97
+ this_size = 1 << this_mop;
98
+ this_mop |= MO_BE;
99
+
100
+ r = memory_region_dispatch_read(mr, mr_offset, &val, this_mop, attrs);
101
+ if (unlikely(r != MEMTX_OK)) {
102
+ io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
103
}
104
+ if (this_size == 8) {
105
+ return val;
106
+ }
107
+
108
+ ret_be = (ret_be << (this_size * 8)) | val;
109
+ addr += this_size;
110
+ mr_offset += this_size;
111
+ size -= this_size;
112
} while (size);
113
+
114
return ret_be;
115
}
52
}
116
53
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
117
--
55
--
118
2.34.1
56
2.43.0
diff view generated by jsdifflib
1
From: Jiajie Chen <c@jia.je>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
2
3
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-8-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/loongarch64/tcg-target.h | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
9
tcg/loongarch64/tcg-target.c.inc | 8 ++++++++
8
1 file changed, 1 insertion(+), 1 deletion(-)
10
2 files changed, 9 insertions(+), 1 deletion(-)
11
9
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
15
+++ b/tcg/loongarch64/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
17
#define TCG_TARGET_HAS_v256 0
15
float64_is_infinity(b)) {
18
16
return float64_mul(a, b, fp_status);
19
#define TCG_TARGET_HAS_not_vec 1
17
}
20
-#define TCG_TARGET_HAS_neg_vec 0
18
- x.mant = int128_mul_6464(accumulated, 1);
21
+#define TCG_TARGET_HAS_neg_vec 1
19
+ x.mant = int128_make64(accumulated);
22
#define TCG_TARGET_HAS_abs_vec 0
20
x.sticky = sticky;
23
#define TCG_TARGET_HAS_andc_vec 1
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
24
#define TCG_TARGET_HAS_orc_vec 1
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
28
+++ b/tcg/loongarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
[TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
31
};
32
LoongArchInsn insn;
33
+ static const LoongArchInsn neg_vec_insn[4] = {
34
+ OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
35
+ };
36
37
a0 = args[0];
38
a1 = args[1];
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
case INDEX_op_sub_vec:
41
tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
42
break;
43
+ case INDEX_op_neg_vec:
44
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
45
+ break;
46
case INDEX_op_dupm_vec:
47
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
48
break;
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
50
case INDEX_op_xor_vec:
51
case INDEX_op_nor_vec:
52
case INDEX_op_not_vec:
53
+ case INDEX_op_neg_vec:
54
return 1;
55
default:
56
return 0;
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
58
return C_O1_I2(w, w, w);
59
60
case INDEX_op_not_vec:
61
+ case INDEX_op_neg_vec:
62
return C_O1_I1(w, w);
63
64
default:
65
--
23
--
66
2.34.1
24
2.43.0
diff view generated by jsdifflib
1
Since the introduction of CPUTLBEntryFull, we can recover
1
Convert all targets simultaneously, as the gen_intermediate_code
2
the full cpu address space physical address without having
2
function disappears from the target. While there are possible
3
to examine the MemoryRegionSection.
3
workarounds, they're larger than simply performing the conversion.
4
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
accel/tcg/cputlb.c | 12 ++++--------
8
include/exec/translator.h | 14 --------------
9
1 file changed, 4 insertions(+), 8 deletions(-)
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
10
71
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
12
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
74
--- a/include/exec/translator.h
14
+++ b/accel/tcg/cputlb.c
75
+++ b/include/exec/translator.h
15
@@ -XXX,XX +XXX,XX @@ io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
76
@@ -XXX,XX +XXX,XX @@
16
77
#include "qemu/bswap.h"
17
static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
78
#include "exec/vaddr.h"
18
unsigned size, MMUAccessType access_type, int mmu_idx,
79
19
- MemTxResult response, uintptr_t retaddr,
80
-/**
20
- MemoryRegionSection *section, hwaddr mr_offset)
81
- * gen_intermediate_code
21
+ MemTxResult response, uintptr_t retaddr)
82
- * @cpu: cpu context
22
{
83
- * @tb: translation block
23
- hwaddr physaddr = (mr_offset +
84
- * @max_insns: max number of instructions to translate
24
- section->offset_within_address_space -
85
- * @pc: guest virtual program counter address
25
- section->offset_within_region);
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
26
-
93
-
27
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
94
/**
28
cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
95
* DisasJumpType:
29
mmu_idx, full->attrs, response, retaddr);
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
30
}
152
}
31
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
153
32
154
void avr_cpu_tcg_init(void);
33
if (r != MEMTX_OK) {
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
34
io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
156
+ int *max_insns, vaddr pc, void *host_pc);
35
- r, retaddr, section, mr_offset);
157
36
+ r, retaddr);
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
37
}
388
}
38
return val;
389
39
}
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
40
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
391
index XXXXXXX..XXXXXXX 100644
41
392
--- a/accel/tcg/translate-all.c
42
if (r != MEMTX_OK) {
393
+++ b/accel/tcg/translate-all.c
43
io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
44
- r, retaddr, section, mr_offset);
395
45
+ r, retaddr);
396
tcg_func_start(tcg_ctx);
46
}
397
47
}
398
- tcg_ctx->cpu = env_cpu(env);
48
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
49
--
953
--
50
2.34.1
954
2.43.0
51
955
52
956
diff view generated by jsdifflib