1
The following changes since commit 223696363bb117241ad9c2facbff0c474afa4104:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'edgar/xilinx-queue-2024-06-17.for-upstream' of https://gitlab.com/edgar.iglesias/qemu into staging (2024-06-18 13:08:01 -0700)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20240619
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to 521d7fb3ebdf88112ed13556a93e3037742b9eb8:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
tcg/loongarch64: Fix tcg_out_movi vs some pcrel pointers (2024-06-19 13:50:22 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
tcg/loongarch64: Support 64- and 256-bit vectors
14
tcg/optimize: Remove in-flight mask data from OptContext
15
tcg/loongarch64: Fix tcg_out_movi vs some pcrel pointers
15
fpu: Add float*_muladd_scalbn
16
util/bufferiszero: Split out host include files
16
fpu: Remove float_muladd_halve_result
17
util/bufferiszero: Add loongarch64 vector acceleration
17
fpu: Add float_round_nearest_even_max
18
accel/tcg: Fix typo causing tb->page_addr[1] to not be recorded
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/sparc: use signed denominator in sdiv helper
19
target/hexagon: Use float32_muladd
20
linux-user: Make TARGET_NR_setgroups affect only the current thread
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
21
21
22
----------------------------------------------------------------
22
----------------------------------------------------------------
23
Anton Johansson (1):
23
Ilya Leoshkevich (1):
24
accel/tcg: Fix typo causing tb->page_addr[1] to not be recorded
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
25
25
26
Clément Chigot (1):
26
Pierrick Bouvier (1):
27
target/sparc: use signed denominator in sdiv helper
27
plugins: optimize cpu_index code generation
28
28
29
Ilya Leoshkevich (1):
29
Richard Henderson (70):
30
linux-user: Make TARGET_NR_setgroups affect only the current thread
30
tcg/optimize: Split out finish_bb, finish_ebb
31
tcg/optimize: Split out fold_affected_mask
32
tcg/optimize: Copy mask writeback to fold_masks
33
tcg/optimize: Split out fold_masks_zs
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
35
tcg/optimize: Change representation of s_mask
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
37
tcg/optimize: Introduce const value accessors for TempOptInfo
38
tcg/optimize: Use fold_masks_zs in fold_and
39
tcg/optimize: Use fold_masks_zs in fold_andc
40
tcg/optimize: Use fold_masks_zs in fold_bswap
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
42
tcg/optimize: Use fold_masks_z in fold_ctpop
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
44
tcg/optimize: Compute sign mask in fold_deposit
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
31
100
32
Richard Henderson (21):
101
include/exec/translator.h | 14 -
33
tcg/loongarch64: Import LASX, FP insns
102
include/fpu/softfloat-types.h | 2 +
34
tcg/loongarch64: Use fp load/store for I32 and I64 into vector regs
103
include/fpu/softfloat.h | 14 +-
35
tcg/loongarch64: Handle i32 and i64 moves between gr and fr
104
include/hw/core/tcg-cpu-ops.h | 13 +
36
tcg/loongarch64: Support TCG_TYPE_V64
105
target/alpha/cpu.h | 2 +
37
util/loongarch64: Detect LASX vector support
106
target/arm/internals.h | 2 +
38
tcg/loongarch64: Simplify tcg_out_dup_vec
107
target/avr/cpu.h | 2 +
39
tcg/loongarch64: Support LASX in tcg_out_dup_vec
108
target/hexagon/cpu.h | 2 +
40
tcg/loongarch64: Support LASX in tcg_out_dupm_vec
109
target/hexagon/fma_emu.h | 3 -
41
tcg/loongarch64: Use tcg_out_dup_vec in tcg_out_dupi_vec
110
target/hppa/cpu.h | 2 +
42
tcg/loongarch64: Support LASX in tcg_out_dupi_vec
111
target/i386/tcg/helper-tcg.h | 2 +
43
tcg/loongarch64: Simplify tcg_out_addsub_vec
112
target/loongarch/internals.h | 2 +
44
tcg/loongarch64: Support LASX in tcg_out_addsub_vec
113
target/m68k/cpu.h | 2 +
45
tcg/loongarch64: Split out vdvjvk in tcg_out_vec_op
114
target/microblaze/cpu.h | 2 +
46
tcg/loongarch64: Support LASX in tcg_out_{mov,ld,st}
115
target/mips/tcg/tcg-internal.h | 2 +
47
tcg/loongarch64: Remove temp_vec from tcg_out_vec_op
116
target/openrisc/cpu.h | 2 +
48
tcg/loongarch64: Split out vdvjukN in tcg_out_vec_op
117
target/ppc/cpu.h | 2 +
49
tcg/loongarch64: Support LASX in tcg_out_vec_op
118
target/riscv/cpu.h | 3 +
50
tcg/loongarch64: Enable v256 with LASX
119
target/rx/cpu.h | 2 +
51
util/bufferiszero: Split out host include files
120
target/s390x/s390x-internal.h | 2 +
52
util/bufferiszero: Add loongarch64 vector acceleration
121
target/sh4/cpu.h | 2 +
53
tcg/loongarch64: Fix tcg_out_movi vs some pcrel pointers
122
target/sparc/cpu.h | 2 +
54
123
target/sparc/helper.h | 4 +-
55
host/include/loongarch64/host/cpuinfo.h | 1 +
124
target/tricore/cpu.h | 2 +
56
tcg/loongarch64/tcg-target.h | 4 +-
125
target/xtensa/cpu.h | 2 +
57
accel/tcg/tb-maint.c | 4 +-
126
accel/tcg/cpu-exec.c | 8 +-
58
linux-user/syscall.c | 10 +-
127
accel/tcg/plugin-gen.c | 9 +
59
target/sparc/helper.c | 2 +-
128
accel/tcg/translate-all.c | 8 +-
60
util/bufferiszero.c | 191 +-
129
fpu/softfloat.c | 63 +--
61
util/cpuinfo-loongarch.c | 1 +
130
target/alpha/cpu.c | 1 +
62
host/include/aarch64/host/bufferiszero.c.inc | 76 +
131
target/alpha/translate.c | 4 +-
63
host/include/generic/host/bufferiszero.c.inc | 10 +
132
target/arm/cpu.c | 1 +
64
host/include/i386/host/bufferiszero.c.inc | 124 +
133
target/arm/tcg/cpu-v7m.c | 1 +
65
host/include/loongarch64/host/bufferiszero.c.inc | 143 +
134
target/arm/tcg/helper-a64.c | 6 +-
66
host/include/x86_64/host/bufferiszero.c.inc | 1 +
135
target/arm/tcg/translate.c | 5 +-
67
tcg/loongarch64/tcg-insn-defs.c.inc | 6181 ++++++++--------------
136
target/avr/cpu.c | 1 +
68
tcg/loongarch64/tcg-target.c.inc | 601 ++-
137
target/avr/translate.c | 6 +-
69
14 files changed, 2838 insertions(+), 4511 deletions(-)
138
target/hexagon/cpu.c | 1 +
70
create mode 100644 host/include/aarch64/host/bufferiszero.c.inc
139
target/hexagon/fma_emu.c | 496 ++++++---------------
71
create mode 100644 host/include/generic/host/bufferiszero.c.inc
140
target/hexagon/op_helper.c | 125 ++----
72
create mode 100644 host/include/i386/host/bufferiszero.c.inc
141
target/hexagon/translate.c | 4 +-
73
create mode 100644 host/include/loongarch64/host/bufferiszero.c.inc
142
target/hppa/cpu.c | 1 +
74
create mode 100644 host/include/x86_64/host/bufferiszero.c.inc
143
target/hppa/translate.c | 4 +-
75
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
2
3
Like TARGET_NR_setuid, TARGET_NR_setgroups should affect only the
3
make check-tcg fails on Fedora with the following error message:
4
calling thread, and not the entire process. Therefore, implement it
5
using a syscall, and not a libc call.
6
4
7
Cc: qemu-stable@nongnu.org
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
8
Fixes: 19b84f3c35d7 ("added setgroups and getgroups syscalls")
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
9
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
11
Message-Id: <20240614154710.1078766-1-iii@linux.ibm.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
24
---
15
linux-user/syscall.c | 10 ++++++++--
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
16
1 file changed, 8 insertions(+), 2 deletions(-)
26
1 file changed, 4 insertions(+), 5 deletions(-)
17
27
18
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
19
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
20
--- a/linux-user/syscall.c
30
--- a/tests/tcg/multiarch/system/memory.c
21
+++ b/linux-user/syscall.c
31
+++ b/tests/tcg/multiarch/system/memory.c
22
@@ -XXX,XX +XXX,XX @@ static inline int tswapid(int id)
32
@@ -XXX,XX +XXX,XX @@
23
#else
33
24
#define __NR_sys_setresgid __NR_setresgid
34
#include <stdint.h>
25
#endif
35
#include <stdbool.h>
26
+#ifdef __NR_setgroups32
36
-#include <inttypes.h>
27
+#define __NR_sys_setgroups __NR_setgroups32
37
#include <minilib.h>
28
+#else
38
29
+#define __NR_sys_setgroups __NR_setgroups
39
#ifndef CHECK_UNALIGNED
30
+#endif
40
@@ -XXX,XX +XXX,XX @@ int main(void)
31
41
int i;
32
_syscall1(int, sys_setuid, uid_t, uid)
42
bool ok = true;
33
_syscall1(int, sys_setgid, gid_t, gid)
43
34
_syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
35
_syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
36
+_syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
37
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
38
void syscall_init(void)
48
39
{
49
/* Run through the unsigned tests first */
40
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
41
unlock_user(target_grouplist, arg2,
51
@@ -XXX,XX +XXX,XX @@ int main(void)
42
gidsetsize * sizeof(target_id));
52
ok = do_signed_reads(true);
43
}
53
}
44
- return get_errno(setgroups(gidsetsize, grouplist));
54
45
+ return get_errno(sys_setgroups(gidsetsize, grouplist));
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
46
}
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
47
case TARGET_NR_fchown:
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
48
return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
49
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
50
}
60
return ok ? 0 : -1;
51
unlock_user(target_grouplist, arg2, 0);
61
}
52
}
53
- return get_errno(setgroups(gidsetsize, grouplist));
54
+ return get_errno(sys_setgroups(gidsetsize, grouplist));
55
}
56
#endif
57
#ifdef TARGET_NR_fchown32
58
--
62
--
59
2.34.1
63
2.43.0
60
61
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
Fixes a bug in the immediate shifts, because the exact
1
Call them directly from the opcode switch statement in tcg_optimize,
2
encoding depends on the element size.
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
3
4
4
Reviewed-by: Song Gao <gaosong@loongson.cn>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/loongarch64/tcg-target.c.inc | 58 ++++++++++++++++++--------------
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
8
1 file changed, 32 insertions(+), 26 deletions(-)
9
1 file changed, 31 insertions(+), 16 deletions(-)
9
10
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/loongarch64/tcg-target.c.inc
13
--- a/tcg/optimize.c
13
+++ b/tcg/loongarch64/tcg-target.c.inc
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
15
static const LoongArchInsn rotrv_vec_insn[4] = {
16
OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
17
};
18
+ static const LoongArchInsn rotri_vec_insn[4] = {
19
+ OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D
20
+ };
21
22
a0 = args[0];
23
a1 = args[1];
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
25
case INDEX_op_sarv_vec:
26
insn = sarv_vec_insn[vece];
27
goto vdvjvk;
28
- case INDEX_op_shli_vec:
29
- tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
30
- break;
31
- case INDEX_op_shri_vec:
32
- tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
33
- break;
34
- case INDEX_op_sari_vec:
35
- tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
36
- break;
37
case INDEX_op_rotlv_vec:
38
/* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
39
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], TCG_VEC_TMP0, a2));
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
case INDEX_op_rotrv_vec:
42
insn = rotrv_vec_insn[vece];
43
goto vdvjvk;
44
+ case INDEX_op_shli_vec:
45
+ insn = shli_vec_insn[vece];
46
+ goto vdvjukN;
47
+ case INDEX_op_shri_vec:
48
+ insn = shri_vec_insn[vece];
49
+ goto vdvjukN;
50
+ case INDEX_op_sari_vec:
51
+ insn = sari_vec_insn[vece];
52
+ goto vdvjukN;
53
case INDEX_op_rotli_vec:
54
/* rotli_vec a1, a2 = rotri_vec a1, -a2 */
55
a2 = extract32(-a2, 0, 3 + vece);
56
- switch (vece) {
57
- case MO_8:
58
- tcg_out_opc_vrotri_b(s, a0, a1, a2);
59
- break;
60
- case MO_16:
61
- tcg_out_opc_vrotri_h(s, a0, a1, a2);
62
- break;
63
- case MO_32:
64
- tcg_out_opc_vrotri_w(s, a0, a1, a2);
65
- break;
66
- case MO_64:
67
- tcg_out_opc_vrotri_d(s, a0, a1, a2);
68
- break;
69
- default:
70
- g_assert_not_reached();
71
- }
72
- break;
73
+ insn = rotri_vec_insn[vece];
74
+ goto vdvjukN;
75
case INDEX_op_bitsel_vec:
76
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
77
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
78
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
79
vdvjvk:
80
tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
81
break;
82
+ vdvjukN:
83
+ switch (vece) {
84
+ case MO_8:
85
+ tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2));
86
+ break;
87
+ case MO_16:
88
+ tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2));
89
+ break;
90
+ case MO_32:
91
+ tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2));
92
+ break;
93
+ case MO_64:
94
+ tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2));
95
+ break;
96
+ default:
97
+ g_assert_not_reached();
98
+ }
99
+ break;
100
}
16
}
101
}
17
}
102
18
19
+static void finish_bb(OptContext *ctx)
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
23
+}
24
+
25
+static void finish_ebb(OptContext *ctx)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
34
{
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
103
--
97
--
104
2.34.1
98
2.43.0
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
tcg/loongarch64/tcg-target.c.inc | 19 +++++++++++++++++++
9
tcg/optimize.c | 21 ++++++---------------
5
1 file changed, 19 insertions(+)
10
1 file changed, 6 insertions(+), 15 deletions(-)
6
11
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
14
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
15
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
12
case TCG_TYPE_V128:
17
* Record "zero" and "sign" masks for the single output of @op.
13
tcg_out_opc_vori_b(s, ret, arg, 0);
18
* See TempOptInfo definition of z_mask and s_mask.
14
break;
19
* If z_mask allows, fold the output to constant zero.
15
+ case TCG_TYPE_V256:
20
+ * The passed s_mask may be augmented by z_mask.
16
+ tcg_out_opc_xvori_b(s, ret, arg, 0);
21
*/
17
+ break;
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
18
default:
34
default:
19
g_assert_not_reached();
35
g_assert_not_reached();
20
}
36
}
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
37
- s_mask = smask_from_zmask(z_mask);
22
tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0);
38
23
}
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
24
break;
42
break;
25
+ case TCG_TYPE_V256:
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
26
+ if (-0x800 <= offset && offset <= 0x7ff) {
44
default:
27
+ tcg_out_opc_xvld(s, dest, base, offset);
45
/* The high bits are undefined: force all bits above the sign to 1. */
28
+ } else {
46
z_mask |= sign << 1;
29
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
47
- s_mask = 0;
30
+ tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0);
48
break;
31
+ }
49
}
32
+ break;
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
57
}
58
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
33
default:
60
default:
34
g_assert_not_reached();
61
g_assert_not_reached();
35
}
62
}
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
37
tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
38
}
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
39
break;
114
break;
40
+ case TCG_TYPE_V256:
115
CASE_OP_32_64(ld8u):
41
+ if (-0x800 <= offset && offset <= 0x7ff) {
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
42
+ tcg_out_opc_xvst(s, src, base, offset);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
43
+ } else {
118
break;
44
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
119
CASE_OP_32_64(ld16s):
45
+ tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0);
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
46
+ }
121
break;
47
+ break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
48
default:
133
default:
49
g_assert_not_reached();
134
g_assert_not_reached();
50
}
51
--
135
--
52
2.34.1
136
2.43.0
diff view generated by jsdifflib
1
Simplify the logic for two-part, 32-bit pc-relative addresses.
1
Change the representation from sign bit repetitions to all bits equal
2
Rather than assume all such fit in int32_t, do some arithmetic
2
to the sign bit, including the sign bit itself.
3
and assert a result, do some arithmetic first and then check
4
to see if the pieces are in range.
5
3
6
Cc: qemu-stable@nongnu.org
4
The previous format has a problem in that it is difficult to recreate
7
Fixes: dacc51720db ("tcg/loongarch64: Implement tcg_out_mov and tcg_out_movi")
5
a valid sign mask after a shift operation: the "repetitions" part of
8
Reviewed-by: Song Gao <gaosong@loongson.cn>
6
the previous format meant that applying the same shift as for the value
9
Reported-by: Song Gao <gaosong@loongson.cn>
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
23
---
12
tcg/loongarch64/tcg-target.c.inc | 32 +++++++++++++++-----------------
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
13
1 file changed, 15 insertions(+), 17 deletions(-)
25
1 file changed, 15 insertions(+), 49 deletions(-)
14
26
15
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/loongarch64/tcg-target.c.inc
29
--- a/tcg/optimize.c
18
+++ b/tcg/loongarch64/tcg-target.c.inc
30
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
20
* back to the slow path.
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
21
*/
123
*/
22
124
if (ctx->type == TCG_TYPE_I32) {
23
- intptr_t pc_offset;
125
z_mask = (int32_t)z_mask;
24
- tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
25
+ intptr_t src_rx, pc_offset;
127
+ s_mask |= INT32_MIN;
26
tcg_target_long hi12, hi32, hi52;
27
28
/* Value fits in signed i32. */
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
30
}
128
}
31
129
32
/* PC-relative cases. */
130
if (z_mask == 0) {
33
- pc_offset = tcg_pcrel_diff(s, (void *)val);
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
34
- if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
132
35
- /* Single pcaddu2i. */
133
ti = ts_info(ts);
36
- tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
134
ti->z_mask = z_mask;
37
- return;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
38
+ src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
136
+
39
+ if ((val & 3) == 0) {
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
40
+ pc_offset = val - src_rx;
138
+ rep = clz64(~s_mask);
41
+ if (pc_offset == sextreg(pc_offset, 0, 22)) {
139
+ rep = MAX(rep, clz64(z_mask));
42
+ /* Single pcaddu2i. */
140
+ rep = MAX(rep - 1, 0);
43
+ tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
141
+ ti->s_mask = INT64_MIN >> rep;
44
+ return;
142
+
45
+ }
143
return true;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
46
}
153
}
47
154
48
- if (pc_offset == (int32_t)pc_offset) {
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
49
- /* Offset within 32 bits; load with pcalau12i + ori. */
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
50
- val_lo = sextreg(val, 0, 12);
157
ctx->s_mask = s_mask;
51
- val_hi = val >> 12;
158
52
- pc_hi = (val - pc_offset) >> 12;
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
53
- offset_hi = val_hi - pc_hi;
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
54
-
161
return true;
55
- tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
162
}
56
- tcg_out_opc_pcalau12i(s, rd, offset_hi);
163
57
+ pc_offset = (val >> 12) - (src_rx >> 12);
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
58
+ if (pc_offset == sextreg(pc_offset, 0, 20)) {
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
59
+ /* Load with pcalau12i + ori. */
166
60
+ tcg_target_long val_lo = val & 0xfff;
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
61
+ tcg_out_opc_pcalau12i(s, rd, pc_offset);
168
- ctx->s_mask = smask_from_smask(s_mask);
62
if (val_lo != 0) {
169
63
- tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
170
return fold_masks(ctx, op);
64
+ tcg_out_opc_ori(s, rd, rd, val_lo);
65
}
66
return;
67
}
171
}
68
--
172
--
69
2.34.1
173
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 9 +++++----
5
1 file changed, 5 insertions(+), 4 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
12
remove_mem_copy_all(ctx);
13
}
14
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
17
{
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
19
int i, nb_oargs;
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
23
}
24
+ return true;
25
}
26
27
/*
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
29
fold_xi_to_x(ctx, op, 0)) {
30
return true;
31
}
32
- return false;
33
+ return finish_folding(ctx, op);
34
}
35
36
/* We cannot as yet do_constant_folding with vectors. */
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
38
fold_xi_to_x(ctx, op, 0)) {
39
return true;
40
}
41
- return false;
42
+ return finish_folding(ctx, op);
43
}
44
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
47
op->args[4] = arg_new_constant(ctx, bl);
48
op->args[5] = arg_new_constant(ctx, bh);
49
}
50
- return false;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
55
--
56
2.43.0
diff view generated by jsdifflib
1
Use inline assembly because no release compiler allows
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
per-function selection of the ISA.
3
2
4
Tested-by: Bibo Mao <maobibo@loongson.cn>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
.../loongarch64/host/bufferiszero.c.inc | 143 ++++++++++++++++++
5
tcg/optimize.c | 20 +++++++++++++++++---
8
1 file changed, 143 insertions(+)
6
1 file changed, 17 insertions(+), 3 deletions(-)
9
create mode 100644 host/include/loongarch64/host/bufferiszero.c.inc
10
7
11
diff --git a/host/include/loongarch64/host/bufferiszero.c.inc b/host/include/loongarch64/host/bufferiszero.c.inc
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
new file mode 100644
9
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX
10
--- a/tcg/optimize.c
14
--- /dev/null
11
+++ b/tcg/optimize.c
15
+++ b/host/include/loongarch64/host/bufferiszero.c.inc
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
16
@@ -XXX,XX +XXX,XX @@
13
return ts_info(arg_temp(arg));
17
+/*
14
}
18
+ * SPDX-License-Identifier: GPL-2.0-or-later
15
19
+ * buffer_is_zero acceleration, loongarch64 version.
16
+static inline bool ti_is_const(TempOptInfo *ti)
20
+ */
21
+
22
+/*
23
+ * Builtins for LSX and LASX are introduced by gcc 14 and llvm 18,
24
+ * but as yet neither has support for attribute target, so neither
25
+ * is able to enable the optimization without globally enabling
26
+ * vector support. Since we want runtime detection, use assembly.
27
+ */
28
+
29
+static bool buffer_is_zero_lsx(const void *buf, size_t len)
30
+{
17
+{
31
+ const void *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
18
+ return ti->is_const;
32
+ const void *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16) - (7 * 16);
33
+ const void *l = buf + len;
34
+ bool ret;
35
+
36
+ asm("vld $vr0,%2,0\n\t" /* first: buf + 0 */
37
+ "vld $vr1,%4,-16\n\t" /* last: buf + len - 16 */
38
+ "vld $vr2,%3,0\n\t" /* e[0] */
39
+ "vld $vr3,%3,16\n\t" /* e[1] */
40
+ "vld $vr4,%3,32\n\t" /* e[2] */
41
+ "vld $vr5,%3,48\n\t" /* e[3] */
42
+ "vld $vr6,%3,64\n\t" /* e[4] */
43
+ "vld $vr7,%3,80\n\t" /* e[5] */
44
+ "vld $vr8,%3,96\n\t" /* e[6] */
45
+ "vor.v $vr0,$vr0,$vr1\n\t"
46
+ "vor.v $vr2,$vr2,$vr3\n\t"
47
+ "vor.v $vr4,$vr4,$vr5\n\t"
48
+ "vor.v $vr6,$vr6,$vr7\n\t"
49
+ "vor.v $vr0,$vr0,$vr2\n\t"
50
+ "vor.v $vr4,$vr4,$vr6\n\t"
51
+ "vor.v $vr0,$vr0,$vr4\n\t"
52
+ "vor.v $vr0,$vr0,$vr8\n\t"
53
+ "or %0,$r0,$r0\n" /* prepare return false */
54
+ "1:\n\t"
55
+ "vsetnez.v $fcc0,$vr0\n\t"
56
+ "bcnez $fcc0,2f\n\t"
57
+ "vld $vr0,%1,0\n\t" /* p[0] */
58
+ "vld $vr1,%1,16\n\t" /* p[1] */
59
+ "vld $vr2,%1,32\n\t" /* p[2] */
60
+ "vld $vr3,%1,48\n\t" /* p[3] */
61
+ "vld $vr4,%1,64\n\t" /* p[4] */
62
+ "vld $vr5,%1,80\n\t" /* p[5] */
63
+ "vld $vr6,%1,96\n\t" /* p[6] */
64
+ "vld $vr7,%1,112\n\t" /* p[7] */
65
+ "addi.d %1,%1,128\n\t"
66
+ "vor.v $vr0,$vr0,$vr1\n\t"
67
+ "vor.v $vr2,$vr2,$vr3\n\t"
68
+ "vor.v $vr4,$vr4,$vr5\n\t"
69
+ "vor.v $vr6,$vr6,$vr7\n\t"
70
+ "vor.v $vr0,$vr0,$vr2\n\t"
71
+ "vor.v $vr4,$vr4,$vr6\n\t"
72
+ "vor.v $vr0,$vr0,$vr4\n\t"
73
+ "bltu %1,%3,1b\n\t"
74
+ "vsetnez.v $fcc0,$vr0\n\t"
75
+ "bcnez $fcc0,2f\n\t"
76
+ "ori %0,$r0,1\n"
77
+ "2:"
78
+ : "=&r"(ret), "+r"(p)
79
+ : "r"(buf), "r"(e), "r"(l)
80
+ : "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "fcc0");
81
+
82
+ return ret;
83
+}
19
+}
84
+
20
+
85
+static bool buffer_is_zero_lasx(const void *buf, size_t len)
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
86
+{
22
+{
87
+ const void *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
23
+ return ti->val;
88
+ const void *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32) - (7 * 32);
89
+ const void *l = buf + len;
90
+ bool ret;
91
+
92
+ asm("xvld $xr0,%2,0\n\t" /* first: buf + 0 */
93
+ "xvld $xr1,%4,-32\n\t" /* last: buf + len - 32 */
94
+ "xvld $xr2,%3,0\n\t" /* e[0] */
95
+ "xvld $xr3,%3,32\n\t" /* e[1] */
96
+ "xvld $xr4,%3,64\n\t" /* e[2] */
97
+ "xvld $xr5,%3,96\n\t" /* e[3] */
98
+ "xvld $xr6,%3,128\n\t" /* e[4] */
99
+ "xvld $xr7,%3,160\n\t" /* e[5] */
100
+ "xvld $xr8,%3,192\n\t" /* e[6] */
101
+ "xvor.v $xr0,$xr0,$xr1\n\t"
102
+ "xvor.v $xr2,$xr2,$xr3\n\t"
103
+ "xvor.v $xr4,$xr4,$xr5\n\t"
104
+ "xvor.v $xr6,$xr6,$xr7\n\t"
105
+ "xvor.v $xr0,$xr0,$xr2\n\t"
106
+ "xvor.v $xr4,$xr4,$xr6\n\t"
107
+ "xvor.v $xr0,$xr0,$xr4\n\t"
108
+ "xvor.v $xr0,$xr0,$xr8\n\t"
109
+ "or %0,$r0,$r0\n\t" /* prepare return false */
110
+ "bgeu %1,%3,2f\n"
111
+ "1:\n\t"
112
+ "xvsetnez.v $fcc0,$xr0\n\t"
113
+ "bcnez $fcc0,3f\n\t"
114
+ "xvld $xr0,%1,0\n\t" /* p[0] */
115
+ "xvld $xr1,%1,32\n\t" /* p[1] */
116
+ "xvld $xr2,%1,64\n\t" /* p[2] */
117
+ "xvld $xr3,%1,96\n\t" /* p[3] */
118
+ "xvld $xr4,%1,128\n\t" /* p[4] */
119
+ "xvld $xr5,%1,160\n\t" /* p[5] */
120
+ "xvld $xr6,%1,192\n\t" /* p[6] */
121
+ "xvld $xr7,%1,224\n\t" /* p[7] */
122
+ "addi.d %1,%1,256\n\t"
123
+ "xvor.v $xr0,$xr0,$xr1\n\t"
124
+ "xvor.v $xr2,$xr2,$xr3\n\t"
125
+ "xvor.v $xr4,$xr4,$xr5\n\t"
126
+ "xvor.v $xr6,$xr6,$xr7\n\t"
127
+ "xvor.v $xr0,$xr0,$xr2\n\t"
128
+ "xvor.v $xr4,$xr4,$xr6\n\t"
129
+ "xvor.v $xr0,$xr0,$xr4\n\t"
130
+ "bltu %1,%3,1b\n"
131
+ "2:\n\t"
132
+ "xvsetnez.v $fcc0,$xr0\n\t"
133
+ "bcnez $fcc0,3f\n\t"
134
+ "ori %0,$r0,1\n"
135
+ "3:"
136
+ : "=&r"(ret), "+r"(p)
137
+ : "r"(buf), "r"(e), "r"(l)
138
+ : "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "fcc0");
139
+
140
+ return ret;
141
+}
24
+}
142
+
25
+
143
+static biz_accel_fn const accel_table[] = {
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
144
+ buffer_is_zero_int_ge256,
27
+{
145
+ buffer_is_zero_lsx,
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
146
+ buffer_is_zero_lasx,
29
+}
147
+};
148
+
30
+
149
+static unsigned best_accel(void)
31
static inline bool ts_is_const(TCGTemp *ts)
150
+{
32
{
151
+ unsigned info = cpuinfo_init();
33
- return ts_info(ts)->is_const;
152
+ if (info & CPUINFO_LASX) {
34
+ return ti_is_const(ts_info(ts));
153
+ return 2;
35
}
154
+ }
36
155
+ if (info & CPUINFO_LSX) {
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
156
+ return 1;
38
{
157
+ }
39
- TempOptInfo *ti = ts_info(ts);
158
+ return 0;
40
- return ti->is_const && ti->val == val;
159
+}
41
+ return ti_is_const_val(ts_info(ts), val);
42
}
43
44
static inline bool arg_is_const(TCGArg arg)
160
--
45
--
161
2.34.1
46
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/loongarch64/tcg-target.c.inc | 10 +++++++---
7
tcg/optimize.c | 13 ++++++++++---
5
1 file changed, 7 insertions(+), 3 deletions(-)
8
1 file changed, 10 insertions(+), 3 deletions(-)
6
9
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
12
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
13
TCGReg rd, TCGReg rs)
14
{
15
- static const LoongArchInsn repl_insn[4] = {
16
- OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H, OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D
17
+ static const LoongArchInsn repl_insn[2][4] = {
18
+ { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H,
19
+ OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D },
20
+ { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H,
21
+ OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D },
22
};
23
+ bool lasx = type == TCG_TYPE_V256;
24
25
tcg_debug_assert(vece <= MO_64);
26
- tcg_out32(s, encode_vdj_insn(repl_insn[vece], rd, rs));
27
+ tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs));
28
return true;
15
return true;
29
}
16
}
30
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
31
--
53
--
32
2.34.1
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
6
---
3
tcg/loongarch64/tcg-target.c.inc | 119 ++++++++++++++++---------------
7
tcg/optimize.c | 14 ++++++++++++--
4
1 file changed, 63 insertions(+), 56 deletions(-)
8
1 file changed, 12 insertions(+), 2 deletions(-)
5
9
6
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
9
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
11
tcg_out_ld(s, type, a0, a1, a2);
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
12
break;
30
break;
13
case INDEX_op_and_vec:
31
case TCG_TYPE_I64:
14
- tcg_out_opc_vand_v(s, a0, a1, a2);
32
and_opc = INDEX_op_and_i64;
15
- break;
33
+ width = 64;
16
+ insn = OPC_VAND_V;
17
+ goto vdvjvk;
18
case INDEX_op_andc_vec:
19
/*
20
* vandn vd, vj, vk: vd = vk & ~vj
21
* andc_vec vd, vj, vk: vd = vj & ~vk
22
- * vk and vk are swapped
23
+ * vj and vk are swapped
24
*/
25
- tcg_out_opc_vandn_v(s, a0, a2, a1);
26
- break;
27
+ a1 = a2;
28
+ a2 = args[1];
29
+ insn = OPC_VANDN_V;
30
+ goto vdvjvk;
31
case INDEX_op_or_vec:
32
- tcg_out_opc_vor_v(s, a0, a1, a2);
33
- break;
34
+ insn = OPC_VOR_V;
35
+ goto vdvjvk;
36
case INDEX_op_orc_vec:
37
- tcg_out_opc_vorn_v(s, a0, a1, a2);
38
- break;
39
+ insn = OPC_VORN_V;
40
+ goto vdvjvk;
41
case INDEX_op_xor_vec:
42
- tcg_out_opc_vxor_v(s, a0, a1, a2);
43
- break;
44
- case INDEX_op_nor_vec:
45
- tcg_out_opc_vnor_v(s, a0, a1, a2);
46
- break;
47
+ insn = OPC_VXOR_V;
48
+ goto vdvjvk;
49
case INDEX_op_not_vec:
50
- tcg_out_opc_vnor_v(s, a0, a1, a1);
51
- break;
52
+ a2 = a1;
53
+ /* fall through */
54
+ case INDEX_op_nor_vec:
55
+ insn = OPC_VNOR_V;
56
+ goto vdvjvk;
57
case INDEX_op_cmp_vec:
58
{
59
TCGCond cond = args[3];
60
+
61
if (const_args[2]) {
62
/*
63
* cmp_vec dest, src, value
64
* Try vseqi/vslei/vslti
65
*/
66
int64_t value = sextract64(a2, 0, 8 << vece);
67
- if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
68
- cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
69
- tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
70
- a0, a1, value));
71
+ if ((cond == TCG_COND_EQ ||
72
+ cond == TCG_COND_LE ||
73
+ cond == TCG_COND_LT) &&
74
+ (-0x10 <= value && value <= 0x0f)) {
75
+ insn = cmp_vec_imm_insn[cond][vece];
76
+ tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
77
break;
78
- } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
79
- (0x00 <= value && value <= 0x1f)) {
80
- tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
81
- a0, a1, value));
82
+ } else if ((cond == TCG_COND_LEU ||
83
+ cond == TCG_COND_LTU) &&
84
+ (0x00 <= value && value <= 0x1f)) {
85
+ insn = cmp_vec_imm_insn[cond][vece];
86
+ tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
87
break;
88
}
89
90
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
91
insn = cmp_vec_insn[cond][vece];
92
tcg_debug_assert(insn != 0);
93
}
94
- tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
95
}
96
- break;
97
+ goto vdvjvk;
98
case INDEX_op_add_vec:
99
tcg_out_addsub_vec(s, false, vece, a0, a1, a2, const_args[2], true);
100
break;
101
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
102
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
103
break;
104
case INDEX_op_mul_vec:
105
- tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
106
- break;
107
+ insn = mul_vec_insn[vece];
108
+ goto vdvjvk;
109
case INDEX_op_smin_vec:
110
- tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
111
- break;
112
+ insn = smin_vec_insn[vece];
113
+ goto vdvjvk;
114
case INDEX_op_smax_vec:
115
- tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
116
- break;
117
+ insn = smax_vec_insn[vece];
118
+ goto vdvjvk;
119
case INDEX_op_umin_vec:
120
- tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
121
- break;
122
+ insn = umin_vec_insn[vece];
123
+ goto vdvjvk;
124
case INDEX_op_umax_vec:
125
- tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
126
- break;
127
+ insn = umax_vec_insn[vece];
128
+ goto vdvjvk;
129
case INDEX_op_ssadd_vec:
130
- tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
131
- break;
132
+ insn = ssadd_vec_insn[vece];
133
+ goto vdvjvk;
134
case INDEX_op_usadd_vec:
135
- tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
136
- break;
137
+ insn = usadd_vec_insn[vece];
138
+ goto vdvjvk;
139
case INDEX_op_sssub_vec:
140
- tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
141
- break;
142
+ insn = sssub_vec_insn[vece];
143
+ goto vdvjvk;
144
case INDEX_op_ussub_vec:
145
- tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
146
- break;
147
+ insn = ussub_vec_insn[vece];
148
+ goto vdvjvk;
149
case INDEX_op_shlv_vec:
150
- tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
151
- break;
152
+ insn = shlv_vec_insn[vece];
153
+ goto vdvjvk;
154
case INDEX_op_shrv_vec:
155
- tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
156
- break;
157
+ insn = shrv_vec_insn[vece];
158
+ goto vdvjvk;
159
case INDEX_op_sarv_vec:
160
- tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
161
- break;
162
+ insn = sarv_vec_insn[vece];
163
+ goto vdvjvk;
164
case INDEX_op_shli_vec:
165
tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
166
break;
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
168
case INDEX_op_sari_vec:
169
tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
170
break;
171
- case INDEX_op_rotrv_vec:
172
- tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
173
- break;
174
case INDEX_op_rotlv_vec:
175
/* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
176
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
177
- tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
178
- temp_vec));
179
- break;
180
+ a2 = temp_vec;
181
+ /* fall through */
182
+ case INDEX_op_rotrv_vec:
183
+ insn = rotrv_vec_insn[vece];
184
+ goto vdvjvk;
185
case INDEX_op_rotli_vec:
186
/* rotli_vec a1, a2 = rotri_vec a1, -a2 */
187
a2 = extract32(-a2, 0, 3 + vece);
188
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
189
break;
34
break;
190
default:
35
default:
191
g_assert_not_reached();
36
g_assert_not_reached();
192
+ vdvjvk:
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
193
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
38
return fold_and(ctx, op);
194
+ break;
195
}
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
196
}
51
}
197
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
198
--
54
--
199
2.34.1
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Each element size has a different encoding, so code cannot
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
be shared in the same way as with tcg_out_dup_vec.
2
Explicitly sign-extend z_mask instead of doing that manually.
3
3
4
Reviewed-by: Song Gao <gaosong@loongson.cn>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/loongarch64/tcg-target.c.inc | 30 ++++++++++++++++++++++++------
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 24 insertions(+), 6 deletions(-)
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
9
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
15
16
TCGReg r, TCGReg base, intptr_t offset)
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
17
{
18
- /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
- if (offset < -0x800 || offset > 0x7ff || \
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
+ bool lasx = type == TCG_TYPE_V256;
20
bool type_change = false;
21
+
21
+ TempOptInfo *t1;
22
+ /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */
22
23
+ if (offset < -0x800 || offset > 0x7ff ||
23
if (fold_const1(ctx, op)) {
24
(offset & ((1 << vece) - 1)) != 0) {
24
return true;
25
tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
25
}
26
base = TCG_REG_TMP0;
26
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
27
- z_mask = arg_info(op->args[1])->z_mask;
28
28
- s_mask = arg_info(op->args[1])->s_mask;
29
switch (vece) {
29
+ t1 = arg_info(op->args[1]);
30
case MO_8:
30
+ z_mask = t1->z_mask;
31
- tcg_out_opc_vldrepl_b(s, r, base, offset);
31
+ s_mask = t1->s_mask;
32
+ if (lasx) {
32
s_mask_old = s_mask;
33
+ tcg_out_opc_xvldrepl_b(s, r, base, offset);
33
34
+ } else {
34
switch (op->opc) {
35
+ tcg_out_opc_vldrepl_b(s, r, base, offset);
35
CASE_OP_32_64(ext8s):
36
+ }
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
37
break;
40
break;
38
case MO_16:
41
CASE_OP_32_64(ext16s):
39
- tcg_out_opc_vldrepl_h(s, r, base, offset);
42
- sign = INT16_MIN;
40
+ if (lasx) {
43
- z_mask = (uint16_t)z_mask;
41
+ tcg_out_opc_xvldrepl_h(s, r, base, offset);
44
+ s_mask |= INT16_MIN;
42
+ } else {
45
+ z_mask = (int16_t)z_mask;
43
+ tcg_out_opc_vldrepl_h(s, r, base, offset);
44
+ }
45
break;
46
break;
46
case MO_32:
47
case INDEX_op_ext_i32_i64:
47
- tcg_out_opc_vldrepl_w(s, r, base, offset);
48
type_change = true;
48
+ if (lasx) {
49
QEMU_FALLTHROUGH;
49
+ tcg_out_opc_xvldrepl_w(s, r, base, offset);
50
case INDEX_op_ext32s_i64:
50
+ } else {
51
- sign = INT32_MIN;
51
+ tcg_out_opc_vldrepl_w(s, r, base, offset);
52
- z_mask = (uint32_t)z_mask;
52
+ }
53
+ s_mask |= INT32_MIN;
53
break;
54
+ z_mask = (int32_t)z_mask;
54
case MO_64:
55
- tcg_out_opc_vldrepl_d(s, r, base, offset);
56
+ if (lasx) {
57
+ tcg_out_opc_xvldrepl_d(s, r, base, offset);
58
+ } else {
59
+ tcg_out_opc_vldrepl_d(s, r, base, offset);
60
+ }
61
break;
55
break;
62
default:
56
default:
63
g_assert_not_reached();
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
64
--
76
--
65
2.34.1
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/loongarch64/tcg-target.c.inc | 223 +++++++++++++++++++------------
6
tcg/optimize.c | 19 +++++++++++--------
5
1 file changed, 137 insertions(+), 86 deletions(-)
7
1 file changed, 11 insertions(+), 8 deletions(-)
6
8
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
11
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
12
const int const_args[TCG_MAX_OP_ARGS])
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
13
{
16
{
14
TCGType type = vecl + TCG_TYPE_V64;
17
+ uint64_t z_mask, s_mask;
15
+ bool lasx = type == TCG_TYPE_V256;
18
+ TempOptInfo *tt, *ft;
16
TCGArg a0, a1, a2, a3;
19
int i;
17
-
20
18
- static const LoongArchInsn cmp_vec_insn[16][4] = {
21
/* If true and false values are the same, eliminate the cmp. */
19
- [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
20
- [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
21
- [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
24
}
22
- [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
25
23
- [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
24
- };
27
- | arg_info(op->args[4])->z_mask;
25
- static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
26
- [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
29
- & arg_info(op->args[4])->s_mask;
27
- [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
30
+ tt = arg_info(op->args[3]);
28
- [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
31
+ ft = arg_info(op->args[4]);
29
- [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
32
+ z_mask = tt->z_mask | ft->z_mask;
30
- [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
33
+ s_mask = tt->s_mask & ft->s_mask;
31
- };
34
32
LoongArchInsn insn;
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
33
- static const LoongArchInsn neg_vec_insn[4] = {
36
- uint64_t tv = arg_info(op->args[3])->val;
34
- OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
37
- uint64_t fv = arg_info(op->args[4])->val;
35
+
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
36
+ static const LoongArchInsn cmp_vec_insn[16][2][4] = {
39
+ uint64_t tv = ti_const_val(tt);
37
+ [TCG_COND_EQ] = {
40
+ uint64_t fv = ti_const_val(ft);
38
+ { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D },
41
TCGOpcode opc, negopc = 0;
39
+ { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D },
42
TCGCond cond = op->args[5];
40
+ },
43
41
+ [TCG_COND_LE] = {
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
42
+ { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D },
43
+ { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D },
44
+ },
45
+ [TCG_COND_LEU] = {
46
+ { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU },
47
+ { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU },
48
+ },
49
+ [TCG_COND_LT] = {
50
+ { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D },
51
+ { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D },
52
+ },
53
+ [TCG_COND_LTU] = {
54
+ { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU },
55
+ { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU },
56
+ }
57
};
58
- static const LoongArchInsn mul_vec_insn[4] = {
59
- OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
60
+ static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = {
61
+ [TCG_COND_EQ] = {
62
+ { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D },
63
+ { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D },
64
+ },
65
+ [TCG_COND_LE] = {
66
+ { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D },
67
+ { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D },
68
+ },
69
+ [TCG_COND_LEU] = {
70
+ { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU },
71
+ { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU },
72
+ },
73
+ [TCG_COND_LT] = {
74
+ { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D },
75
+ { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D },
76
+ },
77
+ [TCG_COND_LTU] = {
78
+ { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU },
79
+ { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU },
80
+ }
81
};
82
- static const LoongArchInsn smin_vec_insn[4] = {
83
- OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
84
+ static const LoongArchInsn neg_vec_insn[2][4] = {
85
+ { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D },
86
+ { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D },
87
};
88
- static const LoongArchInsn umin_vec_insn[4] = {
89
- OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
90
+ static const LoongArchInsn mul_vec_insn[2][4] = {
91
+ { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D },
92
+ { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D },
93
};
94
- static const LoongArchInsn smax_vec_insn[4] = {
95
- OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
96
+ static const LoongArchInsn smin_vec_insn[2][4] = {
97
+ { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D },
98
+ { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D },
99
};
100
- static const LoongArchInsn umax_vec_insn[4] = {
101
- OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
102
+ static const LoongArchInsn umin_vec_insn[2][4] = {
103
+ { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU },
104
+ { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU },
105
};
106
- static const LoongArchInsn ssadd_vec_insn[4] = {
107
- OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
108
+ static const LoongArchInsn smax_vec_insn[2][4] = {
109
+ { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D },
110
+ { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D },
111
};
112
- static const LoongArchInsn usadd_vec_insn[4] = {
113
- OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
114
+ static const LoongArchInsn umax_vec_insn[2][4] = {
115
+ { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU },
116
+ { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU },
117
};
118
- static const LoongArchInsn sssub_vec_insn[4] = {
119
- OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
120
+ static const LoongArchInsn ssadd_vec_insn[2][4] = {
121
+ { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D },
122
+ { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D },
123
};
124
- static const LoongArchInsn ussub_vec_insn[4] = {
125
- OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
126
+ static const LoongArchInsn usadd_vec_insn[2][4] = {
127
+ { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU },
128
+ { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU },
129
};
130
- static const LoongArchInsn shlv_vec_insn[4] = {
131
- OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
132
+ static const LoongArchInsn sssub_vec_insn[2][4] = {
133
+ { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D },
134
+ { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D },
135
};
136
- static const LoongArchInsn shrv_vec_insn[4] = {
137
- OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
138
+ static const LoongArchInsn ussub_vec_insn[2][4] = {
139
+ { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU },
140
+ { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU },
141
};
142
- static const LoongArchInsn sarv_vec_insn[4] = {
143
- OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
144
+ static const LoongArchInsn shlv_vec_insn[2][4] = {
145
+ { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D },
146
+ { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D },
147
};
148
- static const LoongArchInsn shli_vec_insn[4] = {
149
- OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
150
+ static const LoongArchInsn shrv_vec_insn[2][4] = {
151
+ { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D },
152
+ { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D },
153
};
154
- static const LoongArchInsn shri_vec_insn[4] = {
155
- OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
156
+ static const LoongArchInsn sarv_vec_insn[2][4] = {
157
+ { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D },
158
+ { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D },
159
};
160
- static const LoongArchInsn sari_vec_insn[4] = {
161
- OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
162
+ static const LoongArchInsn shli_vec_insn[2][4] = {
163
+ { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D },
164
+ { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D },
165
};
166
- static const LoongArchInsn rotrv_vec_insn[4] = {
167
- OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
168
+ static const LoongArchInsn shri_vec_insn[2][4] = {
169
+ { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D },
170
+ { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D },
171
};
172
- static const LoongArchInsn rotri_vec_insn[4] = {
173
- OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D
174
+ static const LoongArchInsn sari_vec_insn[2][4] = {
175
+ { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D },
176
+ { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D },
177
+ };
178
+ static const LoongArchInsn rotrv_vec_insn[2][4] = {
179
+ { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D },
180
+ { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D },
181
+ };
182
+ static const LoongArchInsn rotri_vec_insn[2][4] = {
183
+ { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D },
184
+ { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D },
185
};
186
187
a0 = args[0];
188
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
189
a2 = args[2];
190
a3 = args[3];
191
192
- /* Currently only supports V64 & V128 */
193
- tcg_debug_assert(type == TCG_TYPE_V64 || type == TCG_TYPE_V128);
194
-
195
switch (opc) {
196
case INDEX_op_st_vec:
197
tcg_out_st(s, type, a0, a1, a2);
198
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
199
tcg_out_ld(s, type, a0, a1, a2);
200
break;
201
case INDEX_op_and_vec:
202
- insn = OPC_VAND_V;
203
+ insn = lasx ? OPC_XVAND_V : OPC_VAND_V;
204
goto vdvjvk;
205
case INDEX_op_andc_vec:
206
/*
207
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
208
*/
209
a1 = a2;
210
a2 = args[1];
211
- insn = OPC_VANDN_V;
212
+ insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V;
213
goto vdvjvk;
214
case INDEX_op_or_vec:
215
- insn = OPC_VOR_V;
216
+ insn = lasx ? OPC_XVOR_V : OPC_VOR_V;
217
goto vdvjvk;
218
case INDEX_op_orc_vec:
219
- insn = OPC_VORN_V;
220
+ insn = lasx ? OPC_XVORN_V : OPC_VORN_V;
221
goto vdvjvk;
222
case INDEX_op_xor_vec:
223
- insn = OPC_VXOR_V;
224
+ insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V;
225
goto vdvjvk;
226
case INDEX_op_not_vec:
227
a2 = a1;
228
/* fall through */
229
case INDEX_op_nor_vec:
230
- insn = OPC_VNOR_V;
231
+ insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
232
goto vdvjvk;
233
case INDEX_op_cmp_vec:
234
{
235
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
236
cond == TCG_COND_LE ||
237
cond == TCG_COND_LT) &&
238
(-0x10 <= value && value <= 0x0f)) {
239
- insn = cmp_vec_imm_insn[cond][vece];
240
+ insn = cmp_vec_imm_insn[cond][lasx][vece];
241
tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
242
break;
243
} else if ((cond == TCG_COND_LEU ||
244
cond == TCG_COND_LTU) &&
245
(0x00 <= value && value <= 0x1f)) {
246
- insn = cmp_vec_imm_insn[cond][vece];
247
+ insn = cmp_vec_imm_insn[cond][lasx][vece];
248
tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
249
break;
250
}
251
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
252
a2 = TCG_VEC_TMP0;
253
}
254
255
- insn = cmp_vec_insn[cond][vece];
256
+ insn = cmp_vec_insn[cond][lasx][vece];
257
if (insn == 0) {
258
TCGArg t;
259
t = a1, a1 = a2, a2 = t;
260
cond = tcg_swap_cond(cond);
261
- insn = cmp_vec_insn[cond][vece];
262
+ insn = cmp_vec_insn[cond][lasx][vece];
263
tcg_debug_assert(insn != 0);
264
}
45
}
265
}
46
}
266
goto vdvjvk;
47
}
267
case INDEX_op_add_vec:
48
- return false;
268
- tcg_out_addsub_vec(s, false, vece, a0, a1, a2, const_args[2], true);
49
+
269
+ tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
270
break;
51
}
271
case INDEX_op_sub_vec:
52
272
- tcg_out_addsub_vec(s, false, vece, a0, a1, a2, const_args[2], false);
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
273
+ tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false);
274
break;
275
case INDEX_op_neg_vec:
276
- tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
277
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1));
278
break;
279
case INDEX_op_mul_vec:
280
- insn = mul_vec_insn[vece];
281
+ insn = mul_vec_insn[lasx][vece];
282
goto vdvjvk;
283
case INDEX_op_smin_vec:
284
- insn = smin_vec_insn[vece];
285
+ insn = smin_vec_insn[lasx][vece];
286
goto vdvjvk;
287
case INDEX_op_smax_vec:
288
- insn = smax_vec_insn[vece];
289
+ insn = smax_vec_insn[lasx][vece];
290
goto vdvjvk;
291
case INDEX_op_umin_vec:
292
- insn = umin_vec_insn[vece];
293
+ insn = umin_vec_insn[lasx][vece];
294
goto vdvjvk;
295
case INDEX_op_umax_vec:
296
- insn = umax_vec_insn[vece];
297
+ insn = umax_vec_insn[lasx][vece];
298
goto vdvjvk;
299
case INDEX_op_ssadd_vec:
300
- insn = ssadd_vec_insn[vece];
301
+ insn = ssadd_vec_insn[lasx][vece];
302
goto vdvjvk;
303
case INDEX_op_usadd_vec:
304
- insn = usadd_vec_insn[vece];
305
+ insn = usadd_vec_insn[lasx][vece];
306
goto vdvjvk;
307
case INDEX_op_sssub_vec:
308
- insn = sssub_vec_insn[vece];
309
+ insn = sssub_vec_insn[lasx][vece];
310
goto vdvjvk;
311
case INDEX_op_ussub_vec:
312
- insn = ussub_vec_insn[vece];
313
+ insn = ussub_vec_insn[lasx][vece];
314
goto vdvjvk;
315
case INDEX_op_shlv_vec:
316
- insn = shlv_vec_insn[vece];
317
+ insn = shlv_vec_insn[lasx][vece];
318
goto vdvjvk;
319
case INDEX_op_shrv_vec:
320
- insn = shrv_vec_insn[vece];
321
+ insn = shrv_vec_insn[lasx][vece];
322
goto vdvjvk;
323
case INDEX_op_sarv_vec:
324
- insn = sarv_vec_insn[vece];
325
+ insn = sarv_vec_insn[lasx][vece];
326
goto vdvjvk;
327
case INDEX_op_rotlv_vec:
328
/* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
329
- tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], TCG_VEC_TMP0, a2));
330
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece],
331
+ TCG_VEC_TMP0, a2));
332
a2 = TCG_VEC_TMP0;
333
/* fall through */
334
case INDEX_op_rotrv_vec:
335
- insn = rotrv_vec_insn[vece];
336
+ insn = rotrv_vec_insn[lasx][vece];
337
goto vdvjvk;
338
case INDEX_op_shli_vec:
339
- insn = shli_vec_insn[vece];
340
+ insn = shli_vec_insn[lasx][vece];
341
goto vdvjukN;
342
case INDEX_op_shri_vec:
343
- insn = shri_vec_insn[vece];
344
+ insn = shri_vec_insn[lasx][vece];
345
goto vdvjukN;
346
case INDEX_op_sari_vec:
347
- insn = sari_vec_insn[vece];
348
+ insn = sari_vec_insn[lasx][vece];
349
goto vdvjukN;
350
case INDEX_op_rotli_vec:
351
/* rotli_vec a1, a2 = rotri_vec a1, -a2 */
352
a2 = extract32(-a2, 0, 3 + vece);
353
- insn = rotri_vec_insn[vece];
354
+ insn = rotri_vec_insn[lasx][vece];
355
goto vdvjukN;
356
case INDEX_op_bitsel_vec:
357
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
358
- tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
359
+ if (lasx) {
360
+ tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1);
361
+ } else {
362
+ tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
363
+ }
364
break;
365
case INDEX_op_dupm_vec:
366
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
367
--
54
--
368
2.34.1
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
32
}
33
- return false;
34
+ return finish_folding(ctx, op);
35
}
36
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
--
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
19
}
20
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
75
--
76
2.43.0
diff view generated by jsdifflib
New patch
1
Stores have no output operands, and so need no further work.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 11 +++++------
7
1 file changed, 5 insertions(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
14
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
16
ctx->prev_mb = NULL;
17
- return false;
18
+ return true;
19
}
20
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
28
}
29
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
38
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
41
TCGType type;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
56
}
57
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
59
--
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
1
We can implement this with fld_d, fst_d for load and store,
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
and then use the normal v128 operations in registers.
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
This will improve support for guests which use v64.
3
will produce false.
4
4
5
Reviewed-by: Song Gao <gaosong@loongson.cn>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
tcg/loongarch64/tcg-target.h | 2 +-
8
tcg/optimize.c | 5 ++---
10
tcg/loongarch64/tcg-target.c.inc | 8 ++++++--
9
1 file changed, 2 insertions(+), 3 deletions(-)
11
2 files changed, 7 insertions(+), 3 deletions(-)
12
10
13
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/loongarch64/tcg-target.h
13
--- a/tcg/optimize.c
16
+++ b/tcg/loongarch64/tcg-target.h
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ typedef enum {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
18
16
19
#define TCG_TARGET_HAS_tst 0
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
20
18
{
21
-#define TCG_TARGET_HAS_v64 0
19
- uint64_t s_mask, z_mask, sign;
22
+#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX)
20
+ uint64_t s_mask, z_mask;
23
#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
21
TempOptInfo *t1, *t2;
24
#define TCG_TARGET_HAS_v256 0
22
25
23
if (fold_const2(ctx, op) ||
26
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
27
index XXXXXXX..XXXXXXX 100644
25
* If the sign bit is known zero, then logical right shift
28
--- a/tcg/loongarch64/tcg-target.c.inc
26
* will not reduce the number of input sign repetitions.
29
+++ b/tcg/loongarch64/tcg-target.c.inc
27
*/
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
28
- sign = -s_mask;
31
}
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
32
}
33
break;
33
break;
34
+ case TCG_TYPE_V64:
35
case TCG_TYPE_V128:
36
tcg_out_opc_vori_b(s, ret, arg, 0);
37
break;
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
39
}
40
break;
41
case TCG_TYPE_I64:
42
+ case TCG_TYPE_V64:
43
if (dest < TCG_REG_V0) {
44
tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
45
} else {
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
47
}
48
break;
49
case TCG_TYPE_I64:
50
+ case TCG_TYPE_V64:
51
if (src < TCG_REG_V0) {
52
tcg_out_ldst(s, OPC_ST_D, src, base, offset);
53
} else {
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
55
a2 = args[2];
56
a3 = args[3];
57
58
- /* Currently only supports V128 */
59
- tcg_debug_assert(type == TCG_TYPE_V128);
60
+ /* Currently only supports V64 & V128 */
61
+ tcg_debug_assert(type == TCG_TYPE_V64 || type == TCG_TYPE_V128);
62
63
switch (opc) {
64
case INDEX_op_st_vec:
65
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
66
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
67
68
if (cpuinfo & CPUINFO_LSX) {
69
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
70
tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
71
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
72
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
73
--
34
--
74
2.34.1
35
2.43.0
75
76
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/loongarch64/tcg-target.c.inc | 36 ++++++++++++++++++--------------
7
tcg/optimize.c | 9 ++++++---
5
1 file changed, 20 insertions(+), 16 deletions(-)
8
1 file changed, 6 insertions(+), 3 deletions(-)
6
9
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
12
tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0);
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
13
}
20
}
14
21
15
-static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
16
- const TCGArg a1, const TCGArg a2,
17
+static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece,
18
+ TCGArg a0, TCGArg a1, TCGArg a2,
19
bool a2_is_const, bool is_add)
20
{
23
{
21
- static const LoongArchInsn add_vec_insn[4] = {
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
22
- OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
25
+ if (fold_const2(ctx, op) ||
23
+ static const LoongArchInsn add_vec_insn[2][4] = {
26
+ fold_xx_to_i(ctx, op, 0) ||
24
+ { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D },
27
+ fold_xi_to_x(ctx, op, 0) ||
25
+ { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D },
28
+ fold_sub_to_neg(ctx, op)) {
26
};
29
return true;
27
- static const LoongArchInsn add_vec_imm_insn[4] = {
28
- OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
29
+ static const LoongArchInsn add_vec_imm_insn[2][4] = {
30
+ { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU },
31
+ { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU },
32
};
33
- static const LoongArchInsn sub_vec_insn[4] = {
34
- OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
35
+ static const LoongArchInsn sub_vec_insn[2][4] = {
36
+ { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D },
37
+ { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D },
38
};
39
- static const LoongArchInsn sub_vec_imm_insn[4] = {
40
- OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
41
+ static const LoongArchInsn sub_vec_imm_insn[2][4] = {
42
+ { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU },
43
+ { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU },
44
};
45
LoongArchInsn insn;
46
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
48
value = -value;
49
}
50
if (value < 0) {
51
- insn = sub_vec_imm_insn[vece];
52
+ insn = sub_vec_imm_insn[lasx][vece];
53
value = -value;
54
} else {
55
- insn = add_vec_imm_insn[vece];
56
+ insn = add_vec_imm_insn[lasx][vece];
57
}
58
59
/* Constraint TCG_CT_CONST_VADD ensures validity. */
60
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
61
}
30
}
62
31
63
if (is_add) {
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
64
- insn = add_vec_insn[vece];
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
65
+ insn = add_vec_insn[lasx][vece];
34
op->args[2] = arg_new_constant(ctx, -val);
66
} else {
67
- insn = sub_vec_insn[vece];
68
+ insn = sub_vec_insn[lasx][vece];
69
}
35
}
70
tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
36
- return false;
37
+ return finish_folding(ctx, op);
71
}
38
}
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
39
73
}
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
74
break;
75
case INDEX_op_add_vec:
76
- tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
77
+ tcg_out_addsub_vec(s, false, vece, a0, a1, a2, const_args[2], true);
78
break;
79
case INDEX_op_sub_vec:
80
- tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
81
+ tcg_out_addsub_vec(s, false, vece, a0, a1, a2, const_args[2], false);
82
break;
83
case INDEX_op_neg_vec:
84
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
85
--
41
--
86
2.34.1
42
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Avoid the use of the OptContext slots.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/loongarch64/tcg-target.c.inc | 36 +++++++++-----------------------
6
tcg/optimize.c | 16 +++++++++-------
5
1 file changed, 10 insertions(+), 26 deletions(-)
7
1 file changed, 9 insertions(+), 7 deletions(-)
6
8
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
11
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
12
case OPC_ST_D:
14
13
tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
14
break;
24
break;
15
+ case OPC_FLD_S:
25
CASE_OP_32_64(ld8u):
16
+ case OPC_FLD_D:
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
17
+ case OPC_FST_S:
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
18
+ case OPC_FST_D:
28
break;
19
+ tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12));
29
CASE_OP_32_64(ld16s):
20
+ break;
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
21
default:
45
default:
22
g_assert_not_reached();
46
g_assert_not_reached();
23
}
47
}
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
48
- return false;
25
if (dest < TCG_REG_V0) {
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
26
tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
50
}
27
} else {
51
28
- tcg_out_dupm_vec(s, TCG_TYPE_I128, MO_32, dest, base, offset);
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
29
+ tcg_out_ldst(s, OPC_FLD_S, dest, base, offset);
30
}
31
break;
32
case TCG_TYPE_I64:
33
if (dest < TCG_REG_V0) {
34
tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
35
} else {
36
- tcg_out_dupm_vec(s, TCG_TYPE_I128, MO_64, dest, base, offset);
37
+ tcg_out_ldst(s, OPC_FLD_D, dest, base, offset);
38
}
39
break;
40
case TCG_TYPE_V128:
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
42
if (src < TCG_REG_V0) {
43
tcg_out_ldst(s, OPC_ST_W, src, base, offset);
44
} else {
45
- /* TODO: Could use fst_s, fstx_s */
46
- if (offset < -0x100 || offset > 0xff || (offset & 3)) {
47
- if (-0x800 <= offset && offset <= 0x7ff) {
48
- tcg_out_opc_addi_d(s, TCG_REG_TMP0, base, offset);
49
- } else {
50
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
51
- tcg_out_opc_add_d(s, TCG_REG_TMP0, TCG_REG_TMP0, base);
52
- }
53
- base = TCG_REG_TMP0;
54
- offset = 0;
55
- }
56
- tcg_out_opc_vstelm_w(s, src, base, offset, 0);
57
+ tcg_out_ldst(s, OPC_FST_S, src, base, offset);
58
}
59
break;
60
case TCG_TYPE_I64:
61
if (src < TCG_REG_V0) {
62
tcg_out_ldst(s, OPC_ST_D, src, base, offset);
63
} else {
64
- /* TODO: Could use fst_d, fstx_d */
65
- if (offset < -0x100 || offset > 0xff || (offset & 7)) {
66
- if (-0x800 <= offset && offset <= 0x7ff) {
67
- tcg_out_opc_addi_d(s, TCG_REG_TMP0, base, offset);
68
- } else {
69
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
70
- tcg_out_opc_add_d(s, TCG_REG_TMP0, TCG_REG_TMP0, base);
71
- }
72
- base = TCG_REG_TMP0;
73
- offset = 0;
74
- }
75
- tcg_out_opc_vstelm_d(s, src, base, offset, 0);
76
+ tcg_out_ldst(s, OPC_FST_D, src, base, offset);
77
}
78
break;
79
case TCG_TYPE_V128:
80
--
53
--
81
2.34.1
54
2.43.0
diff view generated by jsdifflib
1
From: Clément Chigot <chigot@adacore.com>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
The result has to be done with the signed denominator (b32) instead of
4
the unsigned value passed in argument (b).
5
6
Cc: qemu-stable@nongnu.org
7
Fixes: 1326010322d6 ("target/sparc: Remove CC_OP_DIV")
8
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2319
9
Signed-off-by: Clément Chigot <chigot@adacore.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20240606144331.698361-1-chigot@adacore.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
3
---
14
target/sparc/helper.c | 2 +-
4
tcg/optimize.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
16
6
17
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
19
--- a/target/sparc/helper.c
9
--- a/tcg/optimize.c
20
+++ b/target/sparc/helper.c
10
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ uint64_t helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
22
return (uint32_t)(b32 < 0 ? INT32_MAX : INT32_MIN) | (-1ull << 32);
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
23
}
17
}
24
18
25
- a64 /= b;
19
type = ctx->type;
26
+ a64 /= b32;
27
r = a64;
28
if (unlikely(r != a64)) {
29
return (uint32_t)(a64 < 0 ? INT32_MIN : INT32_MAX) | (-1ull << 32);
30
--
20
--
31
2.34.1
21
2.43.0
32
33
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Remove fold_masks as the function becomes unused.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/loongarch64/tcg-target.c.inc | 29 +++++++++++++++--------------
7
tcg/optimize.c | 18 ++++++++----------
5
1 file changed, 15 insertions(+), 14 deletions(-)
8
1 file changed, 8 insertions(+), 10 deletions(-)
6
9
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
12
static const LoongArchInsn sub_vec_imm_insn[4] = {
15
return fold_masks_zs(ctx, op, -1, s_mask);
13
OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
16
}
14
};
17
15
+ LoongArchInsn insn;
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
16
19
-{
17
if (a2_is_const) {
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
18
int64_t value = sextract64(a2, 0, 8 << vece);
21
-}
22
-
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask, s_mask;
31
+ TempOptInfo *t1, *t2;
19
+
32
+
20
if (!is_add) {
33
if (fold_const2_commutative(ctx, op) ||
21
value = -value;
34
fold_xx_to_i(ctx, op, 0) ||
22
}
35
fold_xi_to_x(ctx, op, 0) ||
23
-
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
24
- /* Try vaddi/vsubi */
37
return true;
25
- if (0 <= value && value <= 0x1f) {
26
- tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
27
- a1, value));
28
- return;
29
- } else if (-0x1f <= value && value < 0) {
30
- tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
31
- a1, -value));
32
- return;
33
+ if (value < 0) {
34
+ insn = sub_vec_imm_insn[vece];
35
+ value = -value;
36
+ } else {
37
+ insn = add_vec_imm_insn[vece];
38
}
39
40
- /* constraint TCG_CT_CONST_VADD ensures unreachable */
41
- g_assert_not_reached();
42
+ /* Constraint TCG_CT_CONST_VADD ensures validity. */
43
+ tcg_debug_assert(0 <= value && value <= 0x1f);
44
+
45
+ tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
46
+ return;
47
}
38
}
48
39
49
if (is_add) {
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
50
- tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
41
- | arg_info(op->args[2])->z_mask;
51
+ insn = add_vec_insn[vece];
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
52
} else {
43
- & arg_info(op->args[2])->s_mask;
53
- tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
44
- return fold_masks(ctx, op);
54
+ insn = sub_vec_insn[vece];
45
+ t1 = arg_info(op->args[1]);
55
}
46
+ t2 = arg_info(op->args[2]);
56
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
57
}
50
}
58
51
59
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
60
--
53
--
61
2.34.1
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
/* Propagate constants and copies, fold constant expressions. */
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson <anjo@rev.ng>
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
2
3
3
For TBs crossing page boundaries, the 2nd page will never be
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
recorded/removed, as the index of the 2nd page is computed from the
5
address of the 1st page. This is due to a typo, fix it.
6
7
Cc: qemu-stable@nongnu.org
8
Fixes: deba78709a ("accel/tcg: Always lock pages before translation")
9
Signed-off-by: Anton Johansson <anjo@rev.ng>
10
Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Message-Id: <20240612133031.15298-1-anjo@rev.ng>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
6
---
16
accel/tcg/tb-maint.c | 4 ++--
7
tcg/optimize.c | 4 ++--
17
1 file changed, 2 insertions(+), 2 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
18
9
19
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/tb-maint.c
12
--- a/tcg/optimize.c
22
+++ b/accel/tcg/tb-maint.c
13
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ static void tb_record(TranslationBlock *tb)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
24
tb_page_addr_t paddr0 = tb_page_addr0(tb);
15
g_assert_not_reached();
25
tb_page_addr_t paddr1 = tb_page_addr1(tb);
16
}
26
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
17
27
- tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
20
return true;
30
assert(paddr0 != -1);
21
}
31
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
22
32
@@ -XXX,XX +XXX,XX @@ static void tb_remove(TranslationBlock *tb)
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
33
tb_page_addr_t paddr0 = tb_page_addr0(tb);
24
s_mask = s_mask_old >> pos;
34
tb_page_addr_t paddr1 = tb_page_addr1(tb);
25
s_mask |= -1ull << (len - 1);
35
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
26
36
- tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
37
+ tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
38
29
return true;
39
assert(paddr0 != -1);
30
}
40
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
31
41
--
32
--
42
2.34.1
33
2.43.0
43
44
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
The big comment just above says functions should be sorted.
2
Add forward declarations as needed.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/loongarch64/tcg-target.c.inc | 22 +++++++++++++++++-----
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
5
1 file changed, 17 insertions(+), 5 deletions(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
6
9
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
12
switch (type) {
15
* 3) those that produce information about the result value.
13
case TCG_TYPE_I32:
16
*/
14
case TCG_TYPE_I64:
17
15
- /*
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
16
- * Conventional register-register move used in LoongArch is
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
17
- * `or dst, src, zero`.
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
18
- */
21
+
19
- tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
22
static bool fold_add(OptContext *ctx, TCGOp *op)
20
+ if (ret < TCG_REG_V0) {
23
{
21
+ if (arg < TCG_REG_V0) {
24
if (fold_const2_commutative(ctx, op) ||
22
+ /*
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
23
+ * Conventional register-register move used in LoongArch is
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
24
+ * `or dst, src, zero`.
27
}
25
+ */
28
26
+ tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
30
+{
31
+ /* If true and false values are the same, eliminate the cmp. */
32
+ if (args_are_copies(op->args[2], op->args[3])) {
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
34
+ }
35
+
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
37
+ uint64_t tv = arg_info(op->args[2])->val;
38
+ uint64_t fv = arg_info(op->args[3])->val;
39
+
40
+ if (tv == -1 && fv == 0) {
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
42
+ }
43
+ if (tv == 0 && fv == -1) {
44
+ if (TCG_TARGET_HAS_not_vec) {
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
27
+ } else {
47
+ } else {
28
+ tcg_out_opc_movfr2gr_d(s, ret, arg);
48
+ op->opc = INDEX_op_xor_vec;
29
+ }
49
+ op->args[2] = arg_new_constant(ctx, -1);
30
+ } else {
50
+ return fold_xor(ctx, op);
31
+ if (arg < TCG_REG_V0) {
32
+ tcg_out_opc_movgr2fr_d(s, ret, arg);
33
+ } else {
34
+ tcg_out_opc_fmov_d(s, ret, arg);
35
+ }
51
+ }
36
+ }
52
+ }
37
break;
53
+ }
38
case TCG_TYPE_V128:
54
+ if (arg_is_const(op->args[2])) {
39
tcg_out_opc_vori_b(s, ret, arg, 0);
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
}
90
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
92
-{
93
- /* If true and false values are the same, eliminate the cmp. */
94
- if (args_are_copies(op->args[2], op->args[3])) {
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
96
- }
97
-
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
99
- uint64_t tv = arg_info(op->args[2])->val;
100
- uint64_t fv = arg_info(op->args[3])->val;
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
148
{
40
--
149
--
41
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
The big comment just above says functions should be sorted.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
5
---
3
tcg/loongarch64/tcg-insn-defs.c.inc | 6181 +++++++++------------------
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
4
1 file changed, 2123 insertions(+), 4058 deletions(-)
7
1 file changed, 30 insertions(+), 30 deletions(-)
5
8
6
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/loongarch64/tcg-insn-defs.c.inc
11
--- a/tcg/optimize.c
9
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
12
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
11
*
14
return true;
12
* This file is auto-generated by genqemutcgdefs from
13
* https://github.com/loongson-community/loongarch-opcodes,
14
- * from commit 8027da9a8157a8b47fc48ff1def292e09c5668bd.
15
+ * from commit 7f353fb69bd99ce6edfad7ad63948c4bb526f0bf.
16
* DO NOT EDIT.
17
*/
18
19
typedef enum {
20
+ OPC_MOVGR2SCR = 0x00000800,
21
+ OPC_MOVSCR2GR = 0x00000c00,
22
OPC_CLZ_W = 0x00001400,
23
OPC_CTZ_W = 0x00001c00,
24
OPC_CLZ_D = 0x00002400,
25
@@ -XXX,XX +XXX,XX @@ typedef enum {
26
OPC_SLL_D = 0x00188000,
27
OPC_SRL_D = 0x00190000,
28
OPC_SRA_D = 0x00198000,
29
+ OPC_ROTR_B = 0x001a0000,
30
+ OPC_ROTR_H = 0x001a8000,
31
OPC_ROTR_W = 0x001b0000,
32
OPC_ROTR_D = 0x001b8000,
33
OPC_MUL_W = 0x001c0000,
34
@@ -XXX,XX +XXX,XX @@ typedef enum {
35
OPC_SRLI_D = 0x00450000,
36
OPC_SRAI_W = 0x00488000,
37
OPC_SRAI_D = 0x00490000,
38
+ OPC_ROTRI_B = 0x004c2000,
39
+ OPC_ROTRI_H = 0x004c4000,
40
OPC_ROTRI_W = 0x004c8000,
41
OPC_ROTRI_D = 0x004d0000,
42
OPC_BSTRINS_W = 0x00600000,
43
OPC_BSTRPICK_W = 0x00608000,
44
OPC_BSTRINS_D = 0x00800000,
45
OPC_BSTRPICK_D = 0x00c00000,
46
+ OPC_FMOV_D = 0x01149800,
47
+ OPC_MOVGR2FR_D = 0x0114a800,
48
+ OPC_MOVFR2GR_D = 0x0114b800,
49
OPC_SLTI = 0x02000000,
50
OPC_SLTUI = 0x02400000,
51
OPC_ADDI_W = 0x02800000,
52
@@ -XXX,XX +XXX,XX @@ typedef enum {
53
OPC_ANDI = 0x03400000,
54
OPC_ORI = 0x03800000,
55
OPC_XORI = 0x03c00000,
56
- OPC_VFMADD_S = 0x09100000,
57
- OPC_VFMADD_D = 0x09200000,
58
- OPC_VFMSUB_S = 0x09500000,
59
- OPC_VFMSUB_D = 0x09600000,
60
- OPC_VFNMADD_S = 0x09900000,
61
- OPC_VFNMADD_D = 0x09a00000,
62
- OPC_VFNMSUB_S = 0x09d00000,
63
- OPC_VFNMSUB_D = 0x09e00000,
64
- OPC_VFCMP_CAF_S = 0x0c500000,
65
- OPC_VFCMP_SAF_S = 0x0c508000,
66
- OPC_VFCMP_CLT_S = 0x0c510000,
67
- OPC_VFCMP_SLT_S = 0x0c518000,
68
- OPC_VFCMP_CEQ_S = 0x0c520000,
69
- OPC_VFCMP_SEQ_S = 0x0c528000,
70
- OPC_VFCMP_CLE_S = 0x0c530000,
71
- OPC_VFCMP_SLE_S = 0x0c538000,
72
- OPC_VFCMP_CUN_S = 0x0c540000,
73
- OPC_VFCMP_SUN_S = 0x0c548000,
74
- OPC_VFCMP_CULT_S = 0x0c550000,
75
- OPC_VFCMP_SULT_S = 0x0c558000,
76
- OPC_VFCMP_CUEQ_S = 0x0c560000,
77
- OPC_VFCMP_SUEQ_S = 0x0c568000,
78
- OPC_VFCMP_CULE_S = 0x0c570000,
79
- OPC_VFCMP_SULE_S = 0x0c578000,
80
- OPC_VFCMP_CNE_S = 0x0c580000,
81
- OPC_VFCMP_SNE_S = 0x0c588000,
82
- OPC_VFCMP_COR_S = 0x0c5a0000,
83
- OPC_VFCMP_SOR_S = 0x0c5a8000,
84
- OPC_VFCMP_CUNE_S = 0x0c5c0000,
85
- OPC_VFCMP_SUNE_S = 0x0c5c8000,
86
- OPC_VFCMP_CAF_D = 0x0c600000,
87
- OPC_VFCMP_SAF_D = 0x0c608000,
88
- OPC_VFCMP_CLT_D = 0x0c610000,
89
- OPC_VFCMP_SLT_D = 0x0c618000,
90
- OPC_VFCMP_CEQ_D = 0x0c620000,
91
- OPC_VFCMP_SEQ_D = 0x0c628000,
92
- OPC_VFCMP_CLE_D = 0x0c630000,
93
- OPC_VFCMP_SLE_D = 0x0c638000,
94
- OPC_VFCMP_CUN_D = 0x0c640000,
95
- OPC_VFCMP_SUN_D = 0x0c648000,
96
- OPC_VFCMP_CULT_D = 0x0c650000,
97
- OPC_VFCMP_SULT_D = 0x0c658000,
98
- OPC_VFCMP_CUEQ_D = 0x0c660000,
99
- OPC_VFCMP_SUEQ_D = 0x0c668000,
100
- OPC_VFCMP_CULE_D = 0x0c670000,
101
- OPC_VFCMP_SULE_D = 0x0c678000,
102
- OPC_VFCMP_CNE_D = 0x0c680000,
103
- OPC_VFCMP_SNE_D = 0x0c688000,
104
- OPC_VFCMP_COR_D = 0x0c6a0000,
105
- OPC_VFCMP_SOR_D = 0x0c6a8000,
106
- OPC_VFCMP_CUNE_D = 0x0c6c0000,
107
- OPC_VFCMP_SUNE_D = 0x0c6c8000,
108
OPC_VBITSEL_V = 0x0d100000,
109
+ OPC_XVBITSEL_V = 0x0d200000,
110
OPC_VSHUF_B = 0x0d500000,
111
+ OPC_XVSHUF_B = 0x0d600000,
112
OPC_ADDU16I_D = 0x10000000,
113
OPC_LU12I_W = 0x14000000,
114
OPC_CU32I_D = 0x16000000,
115
@@ -XXX,XX +XXX,XX @@ typedef enum {
116
OPC_LD_BU = 0x2a000000,
117
OPC_LD_HU = 0x2a400000,
118
OPC_LD_WU = 0x2a800000,
119
+ OPC_FLD_S = 0x2b000000,
120
+ OPC_FST_S = 0x2b400000,
121
+ OPC_FLD_D = 0x2b800000,
122
+ OPC_FST_D = 0x2bc00000,
123
OPC_VLD = 0x2c000000,
124
OPC_VST = 0x2c400000,
125
+ OPC_XVLD = 0x2c800000,
126
+ OPC_XVST = 0x2cc00000,
127
OPC_VLDREPL_D = 0x30100000,
128
OPC_VLDREPL_W = 0x30200000,
129
OPC_VLDREPL_H = 0x30400000,
130
@@ -XXX,XX +XXX,XX @@ typedef enum {
131
OPC_VSTELM_W = 0x31200000,
132
OPC_VSTELM_H = 0x31400000,
133
OPC_VSTELM_B = 0x31800000,
134
+ OPC_XVLDREPL_D = 0x32100000,
135
+ OPC_XVLDREPL_W = 0x32200000,
136
+ OPC_XVLDREPL_H = 0x32400000,
137
+ OPC_XVLDREPL_B = 0x32800000,
138
+ OPC_XVSTELM_D = 0x33100000,
139
+ OPC_XVSTELM_W = 0x33200000,
140
+ OPC_XVSTELM_H = 0x33400000,
141
+ OPC_XVSTELM_B = 0x33800000,
142
OPC_LDX_B = 0x38000000,
143
OPC_LDX_H = 0x38040000,
144
OPC_LDX_W = 0x38080000,
145
@@ -XXX,XX +XXX,XX @@ typedef enum {
146
OPC_LDX_BU = 0x38200000,
147
OPC_LDX_HU = 0x38240000,
148
OPC_LDX_WU = 0x38280000,
149
+ OPC_FLDX_S = 0x38300000,
150
+ OPC_FLDX_D = 0x38340000,
151
+ OPC_FSTX_S = 0x38380000,
152
+ OPC_FSTX_D = 0x383c0000,
153
OPC_VLDX = 0x38400000,
154
OPC_VSTX = 0x38440000,
155
+ OPC_XVLDX = 0x38480000,
156
+ OPC_XVSTX = 0x384c0000,
157
OPC_DBAR = 0x38720000,
158
+ OPC_JISCR0 = 0x48000200,
159
+ OPC_JISCR1 = 0x48000300,
160
OPC_JIRL = 0x4c000000,
161
OPC_B = 0x50000000,
162
OPC_BL = 0x54000000,
163
@@ -XXX,XX +XXX,XX @@ typedef enum {
164
OPC_VSUB_H = 0x700c8000,
165
OPC_VSUB_W = 0x700d0000,
166
OPC_VSUB_D = 0x700d8000,
167
- OPC_VADDWEV_H_B = 0x701e0000,
168
- OPC_VADDWEV_W_H = 0x701e8000,
169
- OPC_VADDWEV_D_W = 0x701f0000,
170
- OPC_VADDWEV_Q_D = 0x701f8000,
171
- OPC_VSUBWEV_H_B = 0x70200000,
172
- OPC_VSUBWEV_W_H = 0x70208000,
173
- OPC_VSUBWEV_D_W = 0x70210000,
174
- OPC_VSUBWEV_Q_D = 0x70218000,
175
- OPC_VADDWOD_H_B = 0x70220000,
176
- OPC_VADDWOD_W_H = 0x70228000,
177
- OPC_VADDWOD_D_W = 0x70230000,
178
- OPC_VADDWOD_Q_D = 0x70238000,
179
- OPC_VSUBWOD_H_B = 0x70240000,
180
- OPC_VSUBWOD_W_H = 0x70248000,
181
- OPC_VSUBWOD_D_W = 0x70250000,
182
- OPC_VSUBWOD_Q_D = 0x70258000,
183
- OPC_VADDWEV_H_BU = 0x702e0000,
184
- OPC_VADDWEV_W_HU = 0x702e8000,
185
- OPC_VADDWEV_D_WU = 0x702f0000,
186
- OPC_VADDWEV_Q_DU = 0x702f8000,
187
- OPC_VSUBWEV_H_BU = 0x70300000,
188
- OPC_VSUBWEV_W_HU = 0x70308000,
189
- OPC_VSUBWEV_D_WU = 0x70310000,
190
- OPC_VSUBWEV_Q_DU = 0x70318000,
191
- OPC_VADDWOD_H_BU = 0x70320000,
192
- OPC_VADDWOD_W_HU = 0x70328000,
193
- OPC_VADDWOD_D_WU = 0x70330000,
194
- OPC_VADDWOD_Q_DU = 0x70338000,
195
- OPC_VSUBWOD_H_BU = 0x70340000,
196
- OPC_VSUBWOD_W_HU = 0x70348000,
197
- OPC_VSUBWOD_D_WU = 0x70350000,
198
- OPC_VSUBWOD_Q_DU = 0x70358000,
199
- OPC_VADDWEV_H_BU_B = 0x703e0000,
200
- OPC_VADDWEV_W_HU_H = 0x703e8000,
201
- OPC_VADDWEV_D_WU_W = 0x703f0000,
202
- OPC_VADDWEV_Q_DU_D = 0x703f8000,
203
- OPC_VADDWOD_H_BU_B = 0x70400000,
204
- OPC_VADDWOD_W_HU_H = 0x70408000,
205
- OPC_VADDWOD_D_WU_W = 0x70410000,
206
- OPC_VADDWOD_Q_DU_D = 0x70418000,
207
OPC_VSADD_B = 0x70460000,
208
OPC_VSADD_H = 0x70468000,
209
OPC_VSADD_W = 0x70470000,
210
@@ -XXX,XX +XXX,XX @@ typedef enum {
211
OPC_VSSUB_HU = 0x704c8000,
212
OPC_VSSUB_WU = 0x704d0000,
213
OPC_VSSUB_DU = 0x704d8000,
214
- OPC_VHADDW_H_B = 0x70540000,
215
- OPC_VHADDW_W_H = 0x70548000,
216
- OPC_VHADDW_D_W = 0x70550000,
217
- OPC_VHADDW_Q_D = 0x70558000,
218
- OPC_VHSUBW_H_B = 0x70560000,
219
- OPC_VHSUBW_W_H = 0x70568000,
220
- OPC_VHSUBW_D_W = 0x70570000,
221
- OPC_VHSUBW_Q_D = 0x70578000,
222
- OPC_VHADDW_HU_BU = 0x70580000,
223
- OPC_VHADDW_WU_HU = 0x70588000,
224
- OPC_VHADDW_DU_WU = 0x70590000,
225
- OPC_VHADDW_QU_DU = 0x70598000,
226
- OPC_VHSUBW_HU_BU = 0x705a0000,
227
- OPC_VHSUBW_WU_HU = 0x705a8000,
228
- OPC_VHSUBW_DU_WU = 0x705b0000,
229
- OPC_VHSUBW_QU_DU = 0x705b8000,
230
- OPC_VADDA_B = 0x705c0000,
231
- OPC_VADDA_H = 0x705c8000,
232
- OPC_VADDA_W = 0x705d0000,
233
- OPC_VADDA_D = 0x705d8000,
234
- OPC_VABSD_B = 0x70600000,
235
- OPC_VABSD_H = 0x70608000,
236
- OPC_VABSD_W = 0x70610000,
237
- OPC_VABSD_D = 0x70618000,
238
- OPC_VABSD_BU = 0x70620000,
239
- OPC_VABSD_HU = 0x70628000,
240
- OPC_VABSD_WU = 0x70630000,
241
- OPC_VABSD_DU = 0x70638000,
242
- OPC_VAVG_B = 0x70640000,
243
- OPC_VAVG_H = 0x70648000,
244
- OPC_VAVG_W = 0x70650000,
245
- OPC_VAVG_D = 0x70658000,
246
- OPC_VAVG_BU = 0x70660000,
247
- OPC_VAVG_HU = 0x70668000,
248
- OPC_VAVG_WU = 0x70670000,
249
- OPC_VAVG_DU = 0x70678000,
250
- OPC_VAVGR_B = 0x70680000,
251
- OPC_VAVGR_H = 0x70688000,
252
- OPC_VAVGR_W = 0x70690000,
253
- OPC_VAVGR_D = 0x70698000,
254
- OPC_VAVGR_BU = 0x706a0000,
255
- OPC_VAVGR_HU = 0x706a8000,
256
- OPC_VAVGR_WU = 0x706b0000,
257
- OPC_VAVGR_DU = 0x706b8000,
258
OPC_VMAX_B = 0x70700000,
259
OPC_VMAX_H = 0x70708000,
260
OPC_VMAX_W = 0x70710000,
261
@@ -XXX,XX +XXX,XX @@ typedef enum {
262
OPC_VMUL_H = 0x70848000,
263
OPC_VMUL_W = 0x70850000,
264
OPC_VMUL_D = 0x70858000,
265
- OPC_VMUH_B = 0x70860000,
266
- OPC_VMUH_H = 0x70868000,
267
- OPC_VMUH_W = 0x70870000,
268
- OPC_VMUH_D = 0x70878000,
269
- OPC_VMUH_BU = 0x70880000,
270
- OPC_VMUH_HU = 0x70888000,
271
- OPC_VMUH_WU = 0x70890000,
272
- OPC_VMUH_DU = 0x70898000,
273
- OPC_VMULWEV_H_B = 0x70900000,
274
- OPC_VMULWEV_W_H = 0x70908000,
275
- OPC_VMULWEV_D_W = 0x70910000,
276
- OPC_VMULWEV_Q_D = 0x70918000,
277
- OPC_VMULWOD_H_B = 0x70920000,
278
- OPC_VMULWOD_W_H = 0x70928000,
279
- OPC_VMULWOD_D_W = 0x70930000,
280
- OPC_VMULWOD_Q_D = 0x70938000,
281
- OPC_VMULWEV_H_BU = 0x70980000,
282
- OPC_VMULWEV_W_HU = 0x70988000,
283
- OPC_VMULWEV_D_WU = 0x70990000,
284
- OPC_VMULWEV_Q_DU = 0x70998000,
285
- OPC_VMULWOD_H_BU = 0x709a0000,
286
- OPC_VMULWOD_W_HU = 0x709a8000,
287
- OPC_VMULWOD_D_WU = 0x709b0000,
288
- OPC_VMULWOD_Q_DU = 0x709b8000,
289
- OPC_VMULWEV_H_BU_B = 0x70a00000,
290
- OPC_VMULWEV_W_HU_H = 0x70a08000,
291
- OPC_VMULWEV_D_WU_W = 0x70a10000,
292
- OPC_VMULWEV_Q_DU_D = 0x70a18000,
293
- OPC_VMULWOD_H_BU_B = 0x70a20000,
294
- OPC_VMULWOD_W_HU_H = 0x70a28000,
295
- OPC_VMULWOD_D_WU_W = 0x70a30000,
296
- OPC_VMULWOD_Q_DU_D = 0x70a38000,
297
- OPC_VMADD_B = 0x70a80000,
298
- OPC_VMADD_H = 0x70a88000,
299
- OPC_VMADD_W = 0x70a90000,
300
- OPC_VMADD_D = 0x70a98000,
301
- OPC_VMSUB_B = 0x70aa0000,
302
- OPC_VMSUB_H = 0x70aa8000,
303
- OPC_VMSUB_W = 0x70ab0000,
304
- OPC_VMSUB_D = 0x70ab8000,
305
- OPC_VMADDWEV_H_B = 0x70ac0000,
306
- OPC_VMADDWEV_W_H = 0x70ac8000,
307
- OPC_VMADDWEV_D_W = 0x70ad0000,
308
- OPC_VMADDWEV_Q_D = 0x70ad8000,
309
- OPC_VMADDWOD_H_B = 0x70ae0000,
310
- OPC_VMADDWOD_W_H = 0x70ae8000,
311
- OPC_VMADDWOD_D_W = 0x70af0000,
312
- OPC_VMADDWOD_Q_D = 0x70af8000,
313
- OPC_VMADDWEV_H_BU = 0x70b40000,
314
- OPC_VMADDWEV_W_HU = 0x70b48000,
315
- OPC_VMADDWEV_D_WU = 0x70b50000,
316
- OPC_VMADDWEV_Q_DU = 0x70b58000,
317
- OPC_VMADDWOD_H_BU = 0x70b60000,
318
- OPC_VMADDWOD_W_HU = 0x70b68000,
319
- OPC_VMADDWOD_D_WU = 0x70b70000,
320
- OPC_VMADDWOD_Q_DU = 0x70b78000,
321
- OPC_VMADDWEV_H_BU_B = 0x70bc0000,
322
- OPC_VMADDWEV_W_HU_H = 0x70bc8000,
323
- OPC_VMADDWEV_D_WU_W = 0x70bd0000,
324
- OPC_VMADDWEV_Q_DU_D = 0x70bd8000,
325
- OPC_VMADDWOD_H_BU_B = 0x70be0000,
326
- OPC_VMADDWOD_W_HU_H = 0x70be8000,
327
- OPC_VMADDWOD_D_WU_W = 0x70bf0000,
328
- OPC_VMADDWOD_Q_DU_D = 0x70bf8000,
329
- OPC_VDIV_B = 0x70e00000,
330
- OPC_VDIV_H = 0x70e08000,
331
- OPC_VDIV_W = 0x70e10000,
332
- OPC_VDIV_D = 0x70e18000,
333
- OPC_VMOD_B = 0x70e20000,
334
- OPC_VMOD_H = 0x70e28000,
335
- OPC_VMOD_W = 0x70e30000,
336
- OPC_VMOD_D = 0x70e38000,
337
- OPC_VDIV_BU = 0x70e40000,
338
- OPC_VDIV_HU = 0x70e48000,
339
- OPC_VDIV_WU = 0x70e50000,
340
- OPC_VDIV_DU = 0x70e58000,
341
- OPC_VMOD_BU = 0x70e60000,
342
- OPC_VMOD_HU = 0x70e68000,
343
- OPC_VMOD_WU = 0x70e70000,
344
- OPC_VMOD_DU = 0x70e78000,
345
OPC_VSLL_B = 0x70e80000,
346
OPC_VSLL_H = 0x70e88000,
347
OPC_VSLL_W = 0x70e90000,
348
@@ -XXX,XX +XXX,XX @@ typedef enum {
349
OPC_VROTR_H = 0x70ee8000,
350
OPC_VROTR_W = 0x70ef0000,
351
OPC_VROTR_D = 0x70ef8000,
352
- OPC_VSRLR_B = 0x70f00000,
353
- OPC_VSRLR_H = 0x70f08000,
354
- OPC_VSRLR_W = 0x70f10000,
355
- OPC_VSRLR_D = 0x70f18000,
356
- OPC_VSRAR_B = 0x70f20000,
357
- OPC_VSRAR_H = 0x70f28000,
358
- OPC_VSRAR_W = 0x70f30000,
359
- OPC_VSRAR_D = 0x70f38000,
360
- OPC_VSRLN_B_H = 0x70f48000,
361
- OPC_VSRLN_H_W = 0x70f50000,
362
- OPC_VSRLN_W_D = 0x70f58000,
363
- OPC_VSRAN_B_H = 0x70f68000,
364
- OPC_VSRAN_H_W = 0x70f70000,
365
- OPC_VSRAN_W_D = 0x70f78000,
366
- OPC_VSRLRN_B_H = 0x70f88000,
367
- OPC_VSRLRN_H_W = 0x70f90000,
368
- OPC_VSRLRN_W_D = 0x70f98000,
369
- OPC_VSRARN_B_H = 0x70fa8000,
370
- OPC_VSRARN_H_W = 0x70fb0000,
371
- OPC_VSRARN_W_D = 0x70fb8000,
372
- OPC_VSSRLN_B_H = 0x70fc8000,
373
- OPC_VSSRLN_H_W = 0x70fd0000,
374
- OPC_VSSRLN_W_D = 0x70fd8000,
375
- OPC_VSSRAN_B_H = 0x70fe8000,
376
- OPC_VSSRAN_H_W = 0x70ff0000,
377
- OPC_VSSRAN_W_D = 0x70ff8000,
378
- OPC_VSSRLRN_B_H = 0x71008000,
379
- OPC_VSSRLRN_H_W = 0x71010000,
380
- OPC_VSSRLRN_W_D = 0x71018000,
381
- OPC_VSSRARN_B_H = 0x71028000,
382
- OPC_VSSRARN_H_W = 0x71030000,
383
- OPC_VSSRARN_W_D = 0x71038000,
384
- OPC_VSSRLN_BU_H = 0x71048000,
385
- OPC_VSSRLN_HU_W = 0x71050000,
386
- OPC_VSSRLN_WU_D = 0x71058000,
387
- OPC_VSSRAN_BU_H = 0x71068000,
388
- OPC_VSSRAN_HU_W = 0x71070000,
389
- OPC_VSSRAN_WU_D = 0x71078000,
390
- OPC_VSSRLRN_BU_H = 0x71088000,
391
- OPC_VSSRLRN_HU_W = 0x71090000,
392
- OPC_VSSRLRN_WU_D = 0x71098000,
393
- OPC_VSSRARN_BU_H = 0x710a8000,
394
- OPC_VSSRARN_HU_W = 0x710b0000,
395
- OPC_VSSRARN_WU_D = 0x710b8000,
396
- OPC_VBITCLR_B = 0x710c0000,
397
- OPC_VBITCLR_H = 0x710c8000,
398
- OPC_VBITCLR_W = 0x710d0000,
399
- OPC_VBITCLR_D = 0x710d8000,
400
- OPC_VBITSET_B = 0x710e0000,
401
- OPC_VBITSET_H = 0x710e8000,
402
- OPC_VBITSET_W = 0x710f0000,
403
- OPC_VBITSET_D = 0x710f8000,
404
- OPC_VBITREV_B = 0x71100000,
405
- OPC_VBITREV_H = 0x71108000,
406
- OPC_VBITREV_W = 0x71110000,
407
- OPC_VBITREV_D = 0x71118000,
408
- OPC_VPACKEV_B = 0x71160000,
409
- OPC_VPACKEV_H = 0x71168000,
410
- OPC_VPACKEV_W = 0x71170000,
411
- OPC_VPACKEV_D = 0x71178000,
412
- OPC_VPACKOD_B = 0x71180000,
413
- OPC_VPACKOD_H = 0x71188000,
414
- OPC_VPACKOD_W = 0x71190000,
415
- OPC_VPACKOD_D = 0x71198000,
416
- OPC_VILVL_B = 0x711a0000,
417
- OPC_VILVL_H = 0x711a8000,
418
- OPC_VILVL_W = 0x711b0000,
419
- OPC_VILVL_D = 0x711b8000,
420
- OPC_VILVH_B = 0x711c0000,
421
- OPC_VILVH_H = 0x711c8000,
422
- OPC_VILVH_W = 0x711d0000,
423
- OPC_VILVH_D = 0x711d8000,
424
- OPC_VPICKEV_B = 0x711e0000,
425
- OPC_VPICKEV_H = 0x711e8000,
426
- OPC_VPICKEV_W = 0x711f0000,
427
- OPC_VPICKEV_D = 0x711f8000,
428
- OPC_VPICKOD_B = 0x71200000,
429
- OPC_VPICKOD_H = 0x71208000,
430
- OPC_VPICKOD_W = 0x71210000,
431
- OPC_VPICKOD_D = 0x71218000,
432
OPC_VREPLVE_B = 0x71220000,
433
OPC_VREPLVE_H = 0x71228000,
434
OPC_VREPLVE_W = 0x71230000,
435
@@ -XXX,XX +XXX,XX @@ typedef enum {
436
OPC_VNOR_V = 0x71278000,
437
OPC_VANDN_V = 0x71280000,
438
OPC_VORN_V = 0x71288000,
439
- OPC_VFRSTP_B = 0x712b0000,
440
- OPC_VFRSTP_H = 0x712b8000,
441
- OPC_VADD_Q = 0x712d0000,
442
- OPC_VSUB_Q = 0x712d8000,
443
- OPC_VSIGNCOV_B = 0x712e0000,
444
- OPC_VSIGNCOV_H = 0x712e8000,
445
- OPC_VSIGNCOV_W = 0x712f0000,
446
- OPC_VSIGNCOV_D = 0x712f8000,
447
- OPC_VFADD_S = 0x71308000,
448
- OPC_VFADD_D = 0x71310000,
449
- OPC_VFSUB_S = 0x71328000,
450
- OPC_VFSUB_D = 0x71330000,
451
- OPC_VFMUL_S = 0x71388000,
452
- OPC_VFMUL_D = 0x71390000,
453
- OPC_VFDIV_S = 0x713a8000,
454
- OPC_VFDIV_D = 0x713b0000,
455
- OPC_VFMAX_S = 0x713c8000,
456
- OPC_VFMAX_D = 0x713d0000,
457
- OPC_VFMIN_S = 0x713e8000,
458
- OPC_VFMIN_D = 0x713f0000,
459
- OPC_VFMAXA_S = 0x71408000,
460
- OPC_VFMAXA_D = 0x71410000,
461
- OPC_VFMINA_S = 0x71428000,
462
- OPC_VFMINA_D = 0x71430000,
463
- OPC_VFCVT_H_S = 0x71460000,
464
- OPC_VFCVT_S_D = 0x71468000,
465
- OPC_VFFINT_S_L = 0x71480000,
466
- OPC_VFTINT_W_D = 0x71498000,
467
- OPC_VFTINTRM_W_D = 0x714a0000,
468
- OPC_VFTINTRP_W_D = 0x714a8000,
469
- OPC_VFTINTRZ_W_D = 0x714b0000,
470
- OPC_VFTINTRNE_W_D = 0x714b8000,
471
- OPC_VSHUF_H = 0x717a8000,
472
- OPC_VSHUF_W = 0x717b0000,
473
- OPC_VSHUF_D = 0x717b8000,
474
OPC_VSEQI_B = 0x72800000,
475
OPC_VSEQI_H = 0x72808000,
476
OPC_VSEQI_W = 0x72810000,
477
@@ -XXX,XX +XXX,XX @@ typedef enum {
478
OPC_VSUBI_HU = 0x728c8000,
479
OPC_VSUBI_WU = 0x728d0000,
480
OPC_VSUBI_DU = 0x728d8000,
481
- OPC_VBSLL_V = 0x728e0000,
482
- OPC_VBSRL_V = 0x728e8000,
483
OPC_VMAXI_B = 0x72900000,
484
OPC_VMAXI_H = 0x72908000,
485
OPC_VMAXI_W = 0x72910000,
486
@@ -XXX,XX +XXX,XX @@ typedef enum {
487
OPC_VMINI_HU = 0x72968000,
488
OPC_VMINI_WU = 0x72970000,
489
OPC_VMINI_DU = 0x72978000,
490
- OPC_VFRSTPI_B = 0x729a0000,
491
- OPC_VFRSTPI_H = 0x729a8000,
492
- OPC_VCLO_B = 0x729c0000,
493
- OPC_VCLO_H = 0x729c0400,
494
- OPC_VCLO_W = 0x729c0800,
495
- OPC_VCLO_D = 0x729c0c00,
496
- OPC_VCLZ_B = 0x729c1000,
497
- OPC_VCLZ_H = 0x729c1400,
498
- OPC_VCLZ_W = 0x729c1800,
499
- OPC_VCLZ_D = 0x729c1c00,
500
- OPC_VPCNT_B = 0x729c2000,
501
- OPC_VPCNT_H = 0x729c2400,
502
- OPC_VPCNT_W = 0x729c2800,
503
- OPC_VPCNT_D = 0x729c2c00,
504
OPC_VNEG_B = 0x729c3000,
505
OPC_VNEG_H = 0x729c3400,
506
OPC_VNEG_W = 0x729c3800,
507
OPC_VNEG_D = 0x729c3c00,
508
- OPC_VMSKLTZ_B = 0x729c4000,
509
- OPC_VMSKLTZ_H = 0x729c4400,
510
- OPC_VMSKLTZ_W = 0x729c4800,
511
- OPC_VMSKLTZ_D = 0x729c4c00,
512
- OPC_VMSKGEZ_B = 0x729c5000,
513
- OPC_VMSKNZ_B = 0x729c6000,
514
- OPC_VSETEQZ_V = 0x729c9800,
515
- OPC_VSETNEZ_V = 0x729c9c00,
516
- OPC_VSETANYEQZ_B = 0x729ca000,
517
- OPC_VSETANYEQZ_H = 0x729ca400,
518
- OPC_VSETANYEQZ_W = 0x729ca800,
519
- OPC_VSETANYEQZ_D = 0x729cac00,
520
- OPC_VSETALLNEZ_B = 0x729cb000,
521
- OPC_VSETALLNEZ_H = 0x729cb400,
522
- OPC_VSETALLNEZ_W = 0x729cb800,
523
- OPC_VSETALLNEZ_D = 0x729cbc00,
524
- OPC_VFLOGB_S = 0x729cc400,
525
- OPC_VFLOGB_D = 0x729cc800,
526
- OPC_VFCLASS_S = 0x729cd400,
527
- OPC_VFCLASS_D = 0x729cd800,
528
- OPC_VFSQRT_S = 0x729ce400,
529
- OPC_VFSQRT_D = 0x729ce800,
530
- OPC_VFRECIP_S = 0x729cf400,
531
- OPC_VFRECIP_D = 0x729cf800,
532
- OPC_VFRSQRT_S = 0x729d0400,
533
- OPC_VFRSQRT_D = 0x729d0800,
534
- OPC_VFRINT_S = 0x729d3400,
535
- OPC_VFRINT_D = 0x729d3800,
536
- OPC_VFRINTRM_S = 0x729d4400,
537
- OPC_VFRINTRM_D = 0x729d4800,
538
- OPC_VFRINTRP_S = 0x729d5400,
539
- OPC_VFRINTRP_D = 0x729d5800,
540
- OPC_VFRINTRZ_S = 0x729d6400,
541
- OPC_VFRINTRZ_D = 0x729d6800,
542
- OPC_VFRINTRNE_S = 0x729d7400,
543
- OPC_VFRINTRNE_D = 0x729d7800,
544
- OPC_VFCVTL_S_H = 0x729de800,
545
- OPC_VFCVTH_S_H = 0x729dec00,
546
- OPC_VFCVTL_D_S = 0x729df000,
547
- OPC_VFCVTH_D_S = 0x729df400,
548
- OPC_VFFINT_S_W = 0x729e0000,
549
- OPC_VFFINT_S_WU = 0x729e0400,
550
- OPC_VFFINT_D_L = 0x729e0800,
551
- OPC_VFFINT_D_LU = 0x729e0c00,
552
- OPC_VFFINTL_D_W = 0x729e1000,
553
- OPC_VFFINTH_D_W = 0x729e1400,
554
- OPC_VFTINT_W_S = 0x729e3000,
555
- OPC_VFTINT_L_D = 0x729e3400,
556
- OPC_VFTINTRM_W_S = 0x729e3800,
557
- OPC_VFTINTRM_L_D = 0x729e3c00,
558
- OPC_VFTINTRP_W_S = 0x729e4000,
559
- OPC_VFTINTRP_L_D = 0x729e4400,
560
- OPC_VFTINTRZ_W_S = 0x729e4800,
561
- OPC_VFTINTRZ_L_D = 0x729e4c00,
562
- OPC_VFTINTRNE_W_S = 0x729e5000,
563
- OPC_VFTINTRNE_L_D = 0x729e5400,
564
- OPC_VFTINT_WU_S = 0x729e5800,
565
- OPC_VFTINT_LU_D = 0x729e5c00,
566
- OPC_VFTINTRZ_WU_S = 0x729e7000,
567
- OPC_VFTINTRZ_LU_D = 0x729e7400,
568
- OPC_VFTINTL_L_S = 0x729e8000,
569
- OPC_VFTINTH_L_S = 0x729e8400,
570
- OPC_VFTINTRML_L_S = 0x729e8800,
571
- OPC_VFTINTRMH_L_S = 0x729e8c00,
572
- OPC_VFTINTRPL_L_S = 0x729e9000,
573
- OPC_VFTINTRPH_L_S = 0x729e9400,
574
- OPC_VFTINTRZL_L_S = 0x729e9800,
575
- OPC_VFTINTRZH_L_S = 0x729e9c00,
576
- OPC_VFTINTRNEL_L_S = 0x729ea000,
577
- OPC_VFTINTRNEH_L_S = 0x729ea400,
578
- OPC_VEXTH_H_B = 0x729ee000,
579
- OPC_VEXTH_W_H = 0x729ee400,
580
- OPC_VEXTH_D_W = 0x729ee800,
581
- OPC_VEXTH_Q_D = 0x729eec00,
582
- OPC_VEXTH_HU_BU = 0x729ef000,
583
- OPC_VEXTH_WU_HU = 0x729ef400,
584
- OPC_VEXTH_DU_WU = 0x729ef800,
585
- OPC_VEXTH_QU_DU = 0x729efc00,
586
OPC_VREPLGR2VR_B = 0x729f0000,
587
OPC_VREPLGR2VR_H = 0x729f0400,
588
OPC_VREPLGR2VR_W = 0x729f0800,
589
@@ -XXX,XX +XXX,XX @@ typedef enum {
590
OPC_VROTRI_H = 0x72a04000,
591
OPC_VROTRI_W = 0x72a08000,
592
OPC_VROTRI_D = 0x72a10000,
593
- OPC_VSRLRI_B = 0x72a42000,
594
- OPC_VSRLRI_H = 0x72a44000,
595
- OPC_VSRLRI_W = 0x72a48000,
596
- OPC_VSRLRI_D = 0x72a50000,
597
- OPC_VSRARI_B = 0x72a82000,
598
- OPC_VSRARI_H = 0x72a84000,
599
- OPC_VSRARI_W = 0x72a88000,
600
- OPC_VSRARI_D = 0x72a90000,
601
OPC_VINSGR2VR_B = 0x72eb8000,
602
OPC_VINSGR2VR_H = 0x72ebc000,
603
OPC_VINSGR2VR_W = 0x72ebe000,
604
@@ -XXX,XX +XXX,XX @@ typedef enum {
605
OPC_VREPLVEI_H = 0x72f7c000,
606
OPC_VREPLVEI_W = 0x72f7e000,
607
OPC_VREPLVEI_D = 0x72f7f000,
608
- OPC_VSLLWIL_H_B = 0x73082000,
609
- OPC_VSLLWIL_W_H = 0x73084000,
610
- OPC_VSLLWIL_D_W = 0x73088000,
611
- OPC_VEXTL_Q_D = 0x73090000,
612
- OPC_VSLLWIL_HU_BU = 0x730c2000,
613
- OPC_VSLLWIL_WU_HU = 0x730c4000,
614
- OPC_VSLLWIL_DU_WU = 0x730c8000,
615
- OPC_VEXTL_QU_DU = 0x730d0000,
616
OPC_VBITCLRI_B = 0x73102000,
617
OPC_VBITCLRI_H = 0x73104000,
618
OPC_VBITCLRI_W = 0x73108000,
619
@@ -XXX,XX +XXX,XX @@ typedef enum {
620
OPC_VBITREVI_H = 0x73184000,
621
OPC_VBITREVI_W = 0x73188000,
622
OPC_VBITREVI_D = 0x73190000,
623
- OPC_VSAT_B = 0x73242000,
624
- OPC_VSAT_H = 0x73244000,
625
- OPC_VSAT_W = 0x73248000,
626
- OPC_VSAT_D = 0x73250000,
627
- OPC_VSAT_BU = 0x73282000,
628
- OPC_VSAT_HU = 0x73284000,
629
- OPC_VSAT_WU = 0x73288000,
630
- OPC_VSAT_DU = 0x73290000,
631
OPC_VSLLI_B = 0x732c2000,
632
OPC_VSLLI_H = 0x732c4000,
633
OPC_VSLLI_W = 0x732c8000,
634
@@ -XXX,XX +XXX,XX @@ typedef enum {
635
OPC_VSRAI_H = 0x73344000,
636
OPC_VSRAI_W = 0x73348000,
637
OPC_VSRAI_D = 0x73350000,
638
- OPC_VSRLNI_B_H = 0x73404000,
639
- OPC_VSRLNI_H_W = 0x73408000,
640
- OPC_VSRLNI_W_D = 0x73410000,
641
- OPC_VSRLNI_D_Q = 0x73420000,
642
- OPC_VSRLRNI_B_H = 0x73444000,
643
- OPC_VSRLRNI_H_W = 0x73448000,
644
- OPC_VSRLRNI_W_D = 0x73450000,
645
- OPC_VSRLRNI_D_Q = 0x73460000,
646
- OPC_VSSRLNI_B_H = 0x73484000,
647
- OPC_VSSRLNI_H_W = 0x73488000,
648
- OPC_VSSRLNI_W_D = 0x73490000,
649
- OPC_VSSRLNI_D_Q = 0x734a0000,
650
- OPC_VSSRLNI_BU_H = 0x734c4000,
651
- OPC_VSSRLNI_HU_W = 0x734c8000,
652
- OPC_VSSRLNI_WU_D = 0x734d0000,
653
- OPC_VSSRLNI_DU_Q = 0x734e0000,
654
- OPC_VSSRLRNI_B_H = 0x73504000,
655
- OPC_VSSRLRNI_H_W = 0x73508000,
656
- OPC_VSSRLRNI_W_D = 0x73510000,
657
- OPC_VSSRLRNI_D_Q = 0x73520000,
658
- OPC_VSSRLRNI_BU_H = 0x73544000,
659
- OPC_VSSRLRNI_HU_W = 0x73548000,
660
- OPC_VSSRLRNI_WU_D = 0x73550000,
661
- OPC_VSSRLRNI_DU_Q = 0x73560000,
662
- OPC_VSRANI_B_H = 0x73584000,
663
- OPC_VSRANI_H_W = 0x73588000,
664
- OPC_VSRANI_W_D = 0x73590000,
665
- OPC_VSRANI_D_Q = 0x735a0000,
666
- OPC_VSRARNI_B_H = 0x735c4000,
667
- OPC_VSRARNI_H_W = 0x735c8000,
668
- OPC_VSRARNI_W_D = 0x735d0000,
669
- OPC_VSRARNI_D_Q = 0x735e0000,
670
- OPC_VSSRANI_B_H = 0x73604000,
671
- OPC_VSSRANI_H_W = 0x73608000,
672
- OPC_VSSRANI_W_D = 0x73610000,
673
- OPC_VSSRANI_D_Q = 0x73620000,
674
- OPC_VSSRANI_BU_H = 0x73644000,
675
- OPC_VSSRANI_HU_W = 0x73648000,
676
- OPC_VSSRANI_WU_D = 0x73650000,
677
- OPC_VSSRANI_DU_Q = 0x73660000,
678
- OPC_VSSRARNI_B_H = 0x73684000,
679
- OPC_VSSRARNI_H_W = 0x73688000,
680
- OPC_VSSRARNI_W_D = 0x73690000,
681
- OPC_VSSRARNI_D_Q = 0x736a0000,
682
- OPC_VSSRARNI_BU_H = 0x736c4000,
683
- OPC_VSSRARNI_HU_W = 0x736c8000,
684
- OPC_VSSRARNI_WU_D = 0x736d0000,
685
- OPC_VSSRARNI_DU_Q = 0x736e0000,
686
- OPC_VEXTRINS_D = 0x73800000,
687
- OPC_VEXTRINS_W = 0x73840000,
688
- OPC_VEXTRINS_H = 0x73880000,
689
- OPC_VEXTRINS_B = 0x738c0000,
690
- OPC_VSHUF4I_B = 0x73900000,
691
- OPC_VSHUF4I_H = 0x73940000,
692
- OPC_VSHUF4I_W = 0x73980000,
693
- OPC_VSHUF4I_D = 0x739c0000,
694
OPC_VBITSELI_B = 0x73c40000,
695
OPC_VANDI_B = 0x73d00000,
696
OPC_VORI_B = 0x73d40000,
697
OPC_VXORI_B = 0x73d80000,
698
OPC_VNORI_B = 0x73dc0000,
699
OPC_VLDI = 0x73e00000,
700
- OPC_VPERMI_W = 0x73e40000,
701
+ OPC_XVSEQ_B = 0x74000000,
702
+ OPC_XVSEQ_H = 0x74008000,
703
+ OPC_XVSEQ_W = 0x74010000,
704
+ OPC_XVSEQ_D = 0x74018000,
705
+ OPC_XVSLE_B = 0x74020000,
706
+ OPC_XVSLE_H = 0x74028000,
707
+ OPC_XVSLE_W = 0x74030000,
708
+ OPC_XVSLE_D = 0x74038000,
709
+ OPC_XVSLE_BU = 0x74040000,
710
+ OPC_XVSLE_HU = 0x74048000,
711
+ OPC_XVSLE_WU = 0x74050000,
712
+ OPC_XVSLE_DU = 0x74058000,
713
+ OPC_XVSLT_B = 0x74060000,
714
+ OPC_XVSLT_H = 0x74068000,
715
+ OPC_XVSLT_W = 0x74070000,
716
+ OPC_XVSLT_D = 0x74078000,
717
+ OPC_XVSLT_BU = 0x74080000,
718
+ OPC_XVSLT_HU = 0x74088000,
719
+ OPC_XVSLT_WU = 0x74090000,
720
+ OPC_XVSLT_DU = 0x74098000,
721
+ OPC_XVADD_B = 0x740a0000,
722
+ OPC_XVADD_H = 0x740a8000,
723
+ OPC_XVADD_W = 0x740b0000,
724
+ OPC_XVADD_D = 0x740b8000,
725
+ OPC_XVSUB_B = 0x740c0000,
726
+ OPC_XVSUB_H = 0x740c8000,
727
+ OPC_XVSUB_W = 0x740d0000,
728
+ OPC_XVSUB_D = 0x740d8000,
729
+ OPC_XVSADD_B = 0x74460000,
730
+ OPC_XVSADD_H = 0x74468000,
731
+ OPC_XVSADD_W = 0x74470000,
732
+ OPC_XVSADD_D = 0x74478000,
733
+ OPC_XVSSUB_B = 0x74480000,
734
+ OPC_XVSSUB_H = 0x74488000,
735
+ OPC_XVSSUB_W = 0x74490000,
736
+ OPC_XVSSUB_D = 0x74498000,
737
+ OPC_XVSADD_BU = 0x744a0000,
738
+ OPC_XVSADD_HU = 0x744a8000,
739
+ OPC_XVSADD_WU = 0x744b0000,
740
+ OPC_XVSADD_DU = 0x744b8000,
741
+ OPC_XVSSUB_BU = 0x744c0000,
742
+ OPC_XVSSUB_HU = 0x744c8000,
743
+ OPC_XVSSUB_WU = 0x744d0000,
744
+ OPC_XVSSUB_DU = 0x744d8000,
745
+ OPC_XVMAX_B = 0x74700000,
746
+ OPC_XVMAX_H = 0x74708000,
747
+ OPC_XVMAX_W = 0x74710000,
748
+ OPC_XVMAX_D = 0x74718000,
749
+ OPC_XVMIN_B = 0x74720000,
750
+ OPC_XVMIN_H = 0x74728000,
751
+ OPC_XVMIN_W = 0x74730000,
752
+ OPC_XVMIN_D = 0x74738000,
753
+ OPC_XVMAX_BU = 0x74740000,
754
+ OPC_XVMAX_HU = 0x74748000,
755
+ OPC_XVMAX_WU = 0x74750000,
756
+ OPC_XVMAX_DU = 0x74758000,
757
+ OPC_XVMIN_BU = 0x74760000,
758
+ OPC_XVMIN_HU = 0x74768000,
759
+ OPC_XVMIN_WU = 0x74770000,
760
+ OPC_XVMIN_DU = 0x74778000,
761
+ OPC_XVMUL_B = 0x74840000,
762
+ OPC_XVMUL_H = 0x74848000,
763
+ OPC_XVMUL_W = 0x74850000,
764
+ OPC_XVMUL_D = 0x74858000,
765
+ OPC_XVSLL_B = 0x74e80000,
766
+ OPC_XVSLL_H = 0x74e88000,
767
+ OPC_XVSLL_W = 0x74e90000,
768
+ OPC_XVSLL_D = 0x74e98000,
769
+ OPC_XVSRL_B = 0x74ea0000,
770
+ OPC_XVSRL_H = 0x74ea8000,
771
+ OPC_XVSRL_W = 0x74eb0000,
772
+ OPC_XVSRL_D = 0x74eb8000,
773
+ OPC_XVSRA_B = 0x74ec0000,
774
+ OPC_XVSRA_H = 0x74ec8000,
775
+ OPC_XVSRA_W = 0x74ed0000,
776
+ OPC_XVSRA_D = 0x74ed8000,
777
+ OPC_XVROTR_B = 0x74ee0000,
778
+ OPC_XVROTR_H = 0x74ee8000,
779
+ OPC_XVROTR_W = 0x74ef0000,
780
+ OPC_XVROTR_D = 0x74ef8000,
781
+ OPC_XVREPLVE_B = 0x75220000,
782
+ OPC_XVREPLVE_H = 0x75228000,
783
+ OPC_XVREPLVE_W = 0x75230000,
784
+ OPC_XVREPLVE_D = 0x75238000,
785
+ OPC_XVAND_V = 0x75260000,
786
+ OPC_XVOR_V = 0x75268000,
787
+ OPC_XVXOR_V = 0x75270000,
788
+ OPC_XVNOR_V = 0x75278000,
789
+ OPC_XVANDN_V = 0x75280000,
790
+ OPC_XVORN_V = 0x75288000,
791
+ OPC_XVSEQI_B = 0x76800000,
792
+ OPC_XVSEQI_H = 0x76808000,
793
+ OPC_XVSEQI_W = 0x76810000,
794
+ OPC_XVSEQI_D = 0x76818000,
795
+ OPC_XVSLEI_B = 0x76820000,
796
+ OPC_XVSLEI_H = 0x76828000,
797
+ OPC_XVSLEI_W = 0x76830000,
798
+ OPC_XVSLEI_D = 0x76838000,
799
+ OPC_XVSLEI_BU = 0x76840000,
800
+ OPC_XVSLEI_HU = 0x76848000,
801
+ OPC_XVSLEI_WU = 0x76850000,
802
+ OPC_XVSLEI_DU = 0x76858000,
803
+ OPC_XVSLTI_B = 0x76860000,
804
+ OPC_XVSLTI_H = 0x76868000,
805
+ OPC_XVSLTI_W = 0x76870000,
806
+ OPC_XVSLTI_D = 0x76878000,
807
+ OPC_XVSLTI_BU = 0x76880000,
808
+ OPC_XVSLTI_HU = 0x76888000,
809
+ OPC_XVSLTI_WU = 0x76890000,
810
+ OPC_XVSLTI_DU = 0x76898000,
811
+ OPC_XVADDI_BU = 0x768a0000,
812
+ OPC_XVADDI_HU = 0x768a8000,
813
+ OPC_XVADDI_WU = 0x768b0000,
814
+ OPC_XVADDI_DU = 0x768b8000,
815
+ OPC_XVSUBI_BU = 0x768c0000,
816
+ OPC_XVSUBI_HU = 0x768c8000,
817
+ OPC_XVSUBI_WU = 0x768d0000,
818
+ OPC_XVSUBI_DU = 0x768d8000,
819
+ OPC_XVMAXI_B = 0x76900000,
820
+ OPC_XVMAXI_H = 0x76908000,
821
+ OPC_XVMAXI_W = 0x76910000,
822
+ OPC_XVMAXI_D = 0x76918000,
823
+ OPC_XVMINI_B = 0x76920000,
824
+ OPC_XVMINI_H = 0x76928000,
825
+ OPC_XVMINI_W = 0x76930000,
826
+ OPC_XVMINI_D = 0x76938000,
827
+ OPC_XVMAXI_BU = 0x76940000,
828
+ OPC_XVMAXI_HU = 0x76948000,
829
+ OPC_XVMAXI_WU = 0x76950000,
830
+ OPC_XVMAXI_DU = 0x76958000,
831
+ OPC_XVMINI_BU = 0x76960000,
832
+ OPC_XVMINI_HU = 0x76968000,
833
+ OPC_XVMINI_WU = 0x76970000,
834
+ OPC_XVMINI_DU = 0x76978000,
835
+ OPC_XVNEG_B = 0x769c3000,
836
+ OPC_XVNEG_H = 0x769c3400,
837
+ OPC_XVNEG_W = 0x769c3800,
838
+ OPC_XVNEG_D = 0x769c3c00,
839
+ OPC_XVREPLGR2VR_B = 0x769f0000,
840
+ OPC_XVREPLGR2VR_H = 0x769f0400,
841
+ OPC_XVREPLGR2VR_W = 0x769f0800,
842
+ OPC_XVREPLGR2VR_D = 0x769f0c00,
843
+ OPC_XVROTRI_B = 0x76a02000,
844
+ OPC_XVROTRI_H = 0x76a04000,
845
+ OPC_XVROTRI_W = 0x76a08000,
846
+ OPC_XVROTRI_D = 0x76a10000,
847
+ OPC_XVINSGR2VR_W = 0x76ebc000,
848
+ OPC_XVINSGR2VR_D = 0x76ebe000,
849
+ OPC_XVPICKVE2GR_W = 0x76efc000,
850
+ OPC_XVPICKVE2GR_D = 0x76efe000,
851
+ OPC_XVPICKVE2GR_WU = 0x76f3c000,
852
+ OPC_XVPICKVE2GR_DU = 0x76f3e000,
853
+ OPC_XVREPL128VEI_B = 0x76f78000,
854
+ OPC_XVREPL128VEI_H = 0x76f7c000,
855
+ OPC_XVREPL128VEI_W = 0x76f7e000,
856
+ OPC_XVREPL128VEI_D = 0x76f7f000,
857
+ OPC_XVREPLVE0_B = 0x77070000,
858
+ OPC_XVREPLVE0_H = 0x77078000,
859
+ OPC_XVREPLVE0_W = 0x7707c000,
860
+ OPC_XVREPLVE0_D = 0x7707e000,
861
+ OPC_XVREPLVE0_Q = 0x7707f000,
862
+ OPC_XVBITCLRI_B = 0x77102000,
863
+ OPC_XVBITCLRI_H = 0x77104000,
864
+ OPC_XVBITCLRI_W = 0x77108000,
865
+ OPC_XVBITCLRI_D = 0x77110000,
866
+ OPC_XVBITSETI_B = 0x77142000,
867
+ OPC_XVBITSETI_H = 0x77144000,
868
+ OPC_XVBITSETI_W = 0x77148000,
869
+ OPC_XVBITSETI_D = 0x77150000,
870
+ OPC_XVBITREVI_B = 0x77182000,
871
+ OPC_XVBITREVI_H = 0x77184000,
872
+ OPC_XVBITREVI_W = 0x77188000,
873
+ OPC_XVBITREVI_D = 0x77190000,
874
+ OPC_XVSLLI_B = 0x772c2000,
875
+ OPC_XVSLLI_H = 0x772c4000,
876
+ OPC_XVSLLI_W = 0x772c8000,
877
+ OPC_XVSLLI_D = 0x772d0000,
878
+ OPC_XVSRLI_B = 0x77302000,
879
+ OPC_XVSRLI_H = 0x77304000,
880
+ OPC_XVSRLI_W = 0x77308000,
881
+ OPC_XVSRLI_D = 0x77310000,
882
+ OPC_XVSRAI_B = 0x77342000,
883
+ OPC_XVSRAI_H = 0x77344000,
884
+ OPC_XVSRAI_W = 0x77348000,
885
+ OPC_XVSRAI_D = 0x77350000,
886
+ OPC_XVBITSELI_B = 0x77c40000,
887
+ OPC_XVANDI_B = 0x77d00000,
888
+ OPC_XVORI_B = 0x77d40000,
889
+ OPC_XVXORI_B = 0x77d80000,
890
+ OPC_XVNORI_B = 0x77dc0000,
891
+ OPC_XVLDI = 0x77e00000,
892
} LoongArchInsn;
893
894
static int32_t __attribute__((unused))
895
@@ -XXX,XX +XXX,XX @@ encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
896
}
15
}
897
16
898
static int32_t __attribute__((unused))
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
899
-encode_cdvj_insn(LoongArchInsn opc, TCGReg cd, TCGReg vj)
900
+encode_dfj_insn(LoongArchInsn opc, TCGReg d, TCGReg fj)
901
{
902
- tcg_debug_assert(cd >= 0 && cd <= 0x7);
903
- tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
904
- return encode_dj_slots(opc, cd, vj & 0x1f);
905
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
906
+ tcg_debug_assert(fj >= 0x20 && fj <= 0x3f);
907
+ return encode_dj_slots(opc, d, fj & 0x1f);
908
}
909
910
static int32_t __attribute__((unused))
911
@@ -XXX,XX +XXX,XX @@ encode_djuk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk12)
912
return encode_djk_slots(opc, d, j, uk12);
913
}
914
915
+static int32_t __attribute__((unused))
916
+encode_djuk3_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk3)
917
+{
18
+{
918
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
19
+ /* Canonicalize the comparison to put immediate second. */
919
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
920
+ tcg_debug_assert(uk3 <= 0x7);
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
921
+ return encode_djk_slots(opc, d, j, uk3);
22
+ }
23
+ return finish_folding(ctx, op);
922
+}
24
+}
923
+
25
+
924
+static int32_t __attribute__((unused))
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
925
+encode_djuk4_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk4)
926
+{
27
+{
927
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
28
+ /* If true and false values are the same, eliminate the cmp. */
928
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
29
+ if (args_are_copies(op->args[3], op->args[4])) {
929
+ tcg_debug_assert(uk4 <= 0xf);
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
930
+ return encode_djk_slots(opc, d, j, uk4);
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
931
+}
45
+}
932
+
46
+
933
static int32_t __attribute__((unused))
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
934
encode_djuk5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5)
935
{
48
{
936
@@ -XXX,XX +XXX,XX @@ encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
49
uint64_t z_mask, s_mask;
937
return encode_dj_slots(opc, d, sj20 & 0xfffff);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
938
}
52
}
939
53
940
+static int32_t __attribute__((unused))
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
941
+encode_dtj_insn(LoongArchInsn opc, TCGReg d, TCGReg tj)
942
+{
943
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
944
+ tcg_debug_assert(tj >= 0 && tj <= 0x3);
945
+ return encode_dj_slots(opc, d, tj);
946
+}
947
+
948
static int32_t __attribute__((unused))
949
encode_dvjuk1_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk1)
950
{
951
@@ -XXX,XX +XXX,XX @@ encode_dvjuk4_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk4)
952
return encode_djk_slots(opc, d, vj & 0x1f, uk4);
953
}
954
955
+static int32_t __attribute__((unused))
956
+encode_dxjuk2_insn(LoongArchInsn opc, TCGReg d, TCGReg xj, uint32_t uk2)
957
+{
958
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
959
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
960
+ tcg_debug_assert(uk2 <= 0x3);
961
+ return encode_djk_slots(opc, d, xj & 0x1f, uk2);
962
+}
963
+
964
+static int32_t __attribute__((unused))
965
+encode_dxjuk3_insn(LoongArchInsn opc, TCGReg d, TCGReg xj, uint32_t uk3)
966
+{
967
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
968
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
969
+ tcg_debug_assert(uk3 <= 0x7);
970
+ return encode_djk_slots(opc, d, xj & 0x1f, uk3);
971
+}
972
+
973
+static int32_t __attribute__((unused))
974
+encode_fdfj_insn(LoongArchInsn opc, TCGReg fd, TCGReg fj)
975
+{
976
+ tcg_debug_assert(fd >= 0x20 && fd <= 0x3f);
977
+ tcg_debug_assert(fj >= 0x20 && fj <= 0x3f);
978
+ return encode_dj_slots(opc, fd & 0x1f, fj & 0x1f);
979
+}
980
+
981
+static int32_t __attribute__((unused))
982
+encode_fdj_insn(LoongArchInsn opc, TCGReg fd, TCGReg j)
983
+{
984
+ tcg_debug_assert(fd >= 0x20 && fd <= 0x3f);
985
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
986
+ return encode_dj_slots(opc, fd & 0x1f, j);
987
+}
988
+
989
+static int32_t __attribute__((unused))
990
+encode_fdjk_insn(LoongArchInsn opc, TCGReg fd, TCGReg j, TCGReg k)
991
+{
992
+ tcg_debug_assert(fd >= 0x20 && fd <= 0x3f);
993
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
994
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
995
+ return encode_djk_slots(opc, fd & 0x1f, j, k);
996
+}
997
+
998
+static int32_t __attribute__((unused))
999
+encode_fdjsk12_insn(LoongArchInsn opc, TCGReg fd, TCGReg j, int32_t sk12)
1000
+{
1001
+ tcg_debug_assert(fd >= 0x20 && fd <= 0x3f);
1002
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1003
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
1004
+ return encode_djk_slots(opc, fd & 0x1f, j, sk12 & 0xfff);
1005
+}
1006
+
1007
static int32_t __attribute__((unused))
1008
encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
1009
{
1010
@@ -XXX,XX +XXX,XX @@ encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
1011
return encode_dk_slots(opc, (sd10k16 >> 16) & 0x3ff, sd10k16 & 0xffff);
1012
}
1013
1014
+static int32_t __attribute__((unused))
1015
+encode_sd5k16_insn(LoongArchInsn opc, int32_t sd5k16)
1016
+{
1017
+ tcg_debug_assert(sd5k16 >= -0x100000 && sd5k16 <= 0xfffff);
1018
+ return encode_dk_slots(opc, (sd5k16 >> 16) & 0x1f, sd5k16 & 0xffff);
1019
+}
1020
+
1021
+static int32_t __attribute__((unused))
1022
+encode_tdj_insn(LoongArchInsn opc, TCGReg td, TCGReg j)
1023
+{
1024
+ tcg_debug_assert(td >= 0 && td <= 0x3);
1025
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1026
+ return encode_dj_slots(opc, td, j);
1027
+}
1028
+
1029
static int32_t __attribute__((unused))
1030
encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
1031
{
1032
@@ -XXX,XX +XXX,XX @@ encode_vdvjuk6_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk6)
1033
return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk6);
1034
}
1035
1036
-static int32_t __attribute__((unused))
1037
-encode_vdvjuk7_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk7)
1038
-{
55
-{
1039
- tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
56
- /* Canonicalize the comparison to put immediate second. */
1040
- tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1041
- tcg_debug_assert(uk7 <= 0x7f);
58
- op->args[3] = tcg_swap_cond(op->args[3]);
1042
- return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk7);
59
- }
60
- return finish_folding(ctx, op);
1043
-}
61
-}
1044
-
62
-
1045
static int32_t __attribute__((unused))
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1046
encode_vdvjuk8_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk8)
1047
{
1048
@@ -XXX,XX +XXX,XX @@ encode_vdvjvkva_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk,
1049
return encode_djka_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f, va & 0x1f);
1050
}
1051
1052
+static int32_t __attribute__((unused))
1053
+encode_xdj_insn(LoongArchInsn opc, TCGReg xd, TCGReg j)
1054
+{
1055
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1056
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1057
+ return encode_dj_slots(opc, xd & 0x1f, j);
1058
+}
1059
+
1060
+static int32_t __attribute__((unused))
1061
+encode_xdjk_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, TCGReg k)
1062
+{
1063
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1064
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1065
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
1066
+ return encode_djk_slots(opc, xd & 0x1f, j, k);
1067
+}
1068
+
1069
+static int32_t __attribute__((unused))
1070
+encode_xdjsk10_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk10)
1071
+{
1072
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1073
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1074
+ tcg_debug_assert(sk10 >= -0x200 && sk10 <= 0x1ff);
1075
+ return encode_djk_slots(opc, xd & 0x1f, j, sk10 & 0x3ff);
1076
+}
1077
+
1078
+static int32_t __attribute__((unused))
1079
+encode_xdjsk11_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk11)
1080
+{
1081
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1082
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1083
+ tcg_debug_assert(sk11 >= -0x400 && sk11 <= 0x3ff);
1084
+ return encode_djk_slots(opc, xd & 0x1f, j, sk11 & 0x7ff);
1085
+}
1086
+
1087
+static int32_t __attribute__((unused))
1088
+encode_xdjsk12_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk12)
1089
+{
1090
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1091
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1092
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
1093
+ return encode_djk_slots(opc, xd & 0x1f, j, sk12 & 0xfff);
1094
+}
1095
+
1096
+static int32_t __attribute__((unused))
1097
+encode_xdjsk8un2_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk8,
1098
+ uint32_t un2)
1099
+{
1100
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1101
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1102
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
1103
+ tcg_debug_assert(un2 <= 0x3);
1104
+ return encode_djkn_slots(opc, xd & 0x1f, j, sk8 & 0xff, un2);
1105
+}
1106
+
1107
+static int32_t __attribute__((unused))
1108
+encode_xdjsk8un3_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk8,
1109
+ uint32_t un3)
1110
+{
1111
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1112
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1113
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
1114
+ tcg_debug_assert(un3 <= 0x7);
1115
+ return encode_djkn_slots(opc, xd & 0x1f, j, sk8 & 0xff, un3);
1116
+}
1117
+
1118
+static int32_t __attribute__((unused))
1119
+encode_xdjsk8un4_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk8,
1120
+ uint32_t un4)
1121
+{
1122
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1123
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1124
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
1125
+ tcg_debug_assert(un4 <= 0xf);
1126
+ return encode_djkn_slots(opc, xd & 0x1f, j, sk8 & 0xff, un4);
1127
+}
1128
+
1129
+static int32_t __attribute__((unused))
1130
+encode_xdjsk8un5_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk8,
1131
+ uint32_t un5)
1132
+{
1133
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1134
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1135
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
1136
+ tcg_debug_assert(un5 <= 0x1f);
1137
+ return encode_djkn_slots(opc, xd & 0x1f, j, sk8 & 0xff, un5);
1138
+}
1139
+
1140
+static int32_t __attribute__((unused))
1141
+encode_xdjsk9_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, int32_t sk9)
1142
+{
1143
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1144
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1145
+ tcg_debug_assert(sk9 >= -0x100 && sk9 <= 0xff);
1146
+ return encode_djk_slots(opc, xd & 0x1f, j, sk9 & 0x1ff);
1147
+}
1148
+
1149
+static int32_t __attribute__((unused))
1150
+encode_xdjuk2_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, uint32_t uk2)
1151
+{
1152
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1153
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1154
+ tcg_debug_assert(uk2 <= 0x3);
1155
+ return encode_djk_slots(opc, xd & 0x1f, j, uk2);
1156
+}
1157
+
1158
+static int32_t __attribute__((unused))
1159
+encode_xdjuk3_insn(LoongArchInsn opc, TCGReg xd, TCGReg j, uint32_t uk3)
1160
+{
1161
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1162
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
1163
+ tcg_debug_assert(uk3 <= 0x7);
1164
+ return encode_djk_slots(opc, xd & 0x1f, j, uk3);
1165
+}
1166
+
1167
+static int32_t __attribute__((unused))
1168
+encode_xdsj13_insn(LoongArchInsn opc, TCGReg xd, int32_t sj13)
1169
+{
1170
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1171
+ tcg_debug_assert(sj13 >= -0x1000 && sj13 <= 0xfff);
1172
+ return encode_dj_slots(opc, xd & 0x1f, sj13 & 0x1fff);
1173
+}
1174
+
1175
+static int32_t __attribute__((unused))
1176
+encode_xdxj_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj)
1177
+{
1178
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1179
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1180
+ return encode_dj_slots(opc, xd & 0x1f, xj & 0x1f);
1181
+}
1182
+
1183
+static int32_t __attribute__((unused))
1184
+encode_xdxjk_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, TCGReg k)
1185
+{
1186
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1187
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1188
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
1189
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, k);
1190
+}
1191
+
1192
+static int32_t __attribute__((unused))
1193
+encode_xdxjsk5_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, int32_t sk5)
1194
+{
1195
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1196
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1197
+ tcg_debug_assert(sk5 >= -0x10 && sk5 <= 0xf);
1198
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, sk5 & 0x1f);
1199
+}
1200
+
1201
+static int32_t __attribute__((unused))
1202
+encode_xdxjuk1_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk1)
1203
+{
1204
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1205
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1206
+ tcg_debug_assert(uk1 <= 0x1);
1207
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk1);
1208
+}
1209
+
1210
+static int32_t __attribute__((unused))
1211
+encode_xdxjuk2_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk2)
1212
+{
1213
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1214
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1215
+ tcg_debug_assert(uk2 <= 0x3);
1216
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk2);
1217
+}
1218
+
1219
+static int32_t __attribute__((unused))
1220
+encode_xdxjuk3_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk3)
1221
+{
1222
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1223
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1224
+ tcg_debug_assert(uk3 <= 0x7);
1225
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk3);
1226
+}
1227
+
1228
+static int32_t __attribute__((unused))
1229
+encode_xdxjuk4_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk4)
1230
+{
1231
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1232
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1233
+ tcg_debug_assert(uk4 <= 0xf);
1234
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk4);
1235
+}
1236
+
1237
+static int32_t __attribute__((unused))
1238
+encode_xdxjuk5_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk5)
1239
+{
1240
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1241
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1242
+ tcg_debug_assert(uk5 <= 0x1f);
1243
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk5);
1244
+}
1245
+
1246
+static int32_t __attribute__((unused))
1247
+encode_xdxjuk6_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk6)
1248
+{
1249
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1250
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1251
+ tcg_debug_assert(uk6 <= 0x3f);
1252
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk6);
1253
+}
1254
+
1255
+static int32_t __attribute__((unused))
1256
+encode_xdxjuk8_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, uint32_t uk8)
1257
+{
1258
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1259
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1260
+ tcg_debug_assert(uk8 <= 0xff);
1261
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, uk8);
1262
+}
1263
+
1264
+static int32_t __attribute__((unused))
1265
+encode_xdxjxk_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, TCGReg xk)
1266
+{
1267
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1268
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1269
+ tcg_debug_assert(xk >= 0x20 && xk <= 0x3f);
1270
+ return encode_djk_slots(opc, xd & 0x1f, xj & 0x1f, xk & 0x1f);
1271
+}
1272
+
1273
+static int32_t __attribute__((unused))
1274
+encode_xdxjxkxa_insn(LoongArchInsn opc, TCGReg xd, TCGReg xj, TCGReg xk,
1275
+ TCGReg xa)
1276
+{
1277
+ tcg_debug_assert(xd >= 0x20 && xd <= 0x3f);
1278
+ tcg_debug_assert(xj >= 0x20 && xj <= 0x3f);
1279
+ tcg_debug_assert(xk >= 0x20 && xk <= 0x3f);
1280
+ tcg_debug_assert(xa >= 0x20 && xa <= 0x3f);
1281
+ return encode_djka_slots(opc, xd & 0x1f, xj & 0x1f, xk & 0x1f, xa & 0x1f);
1282
+}
1283
+
1284
+/* Emits the `movgr2scr td, j` instruction. */
1285
+static void __attribute__((unused))
1286
+tcg_out_opc_movgr2scr(TCGContext *s, TCGReg td, TCGReg j)
1287
+{
1288
+ tcg_out32(s, encode_tdj_insn(OPC_MOVGR2SCR, td, j));
1289
+}
1290
+
1291
+/* Emits the `movscr2gr d, tj` instruction. */
1292
+static void __attribute__((unused))
1293
+tcg_out_opc_movscr2gr(TCGContext *s, TCGReg d, TCGReg tj)
1294
+{
1295
+ tcg_out32(s, encode_dtj_insn(OPC_MOVSCR2GR, d, tj));
1296
+}
1297
+
1298
/* Emits the `clz.w d, j` instruction. */
1299
static void __attribute__((unused))
1300
tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
1301
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_sra_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1302
tcg_out32(s, encode_djk_insn(OPC_SRA_D, d, j, k));
1303
}
1304
1305
+/* Emits the `rotr.b d, j, k` instruction. */
1306
+static void __attribute__((unused))
1307
+tcg_out_opc_rotr_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1308
+{
1309
+ tcg_out32(s, encode_djk_insn(OPC_ROTR_B, d, j, k));
1310
+}
1311
+
1312
+/* Emits the `rotr.h d, j, k` instruction. */
1313
+static void __attribute__((unused))
1314
+tcg_out_opc_rotr_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1315
+{
1316
+ tcg_out32(s, encode_djk_insn(OPC_ROTR_H, d, j, k));
1317
+}
1318
+
1319
/* Emits the `rotr.w d, j, k` instruction. */
1320
static void __attribute__((unused))
1321
tcg_out_opc_rotr_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1322
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_srai_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
1323
tcg_out32(s, encode_djuk6_insn(OPC_SRAI_D, d, j, uk6));
1324
}
1325
1326
+/* Emits the `rotri.b d, j, uk3` instruction. */
1327
+static void __attribute__((unused))
1328
+tcg_out_opc_rotri_b(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk3)
1329
+{
1330
+ tcg_out32(s, encode_djuk3_insn(OPC_ROTRI_B, d, j, uk3));
1331
+}
1332
+
1333
+/* Emits the `rotri.h d, j, uk4` instruction. */
1334
+static void __attribute__((unused))
1335
+tcg_out_opc_rotri_h(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk4)
1336
+{
1337
+ tcg_out32(s, encode_djuk4_insn(OPC_ROTRI_H, d, j, uk4));
1338
+}
1339
+
1340
/* Emits the `rotri.w d, j, uk5` instruction. */
1341
static void __attribute__((unused))
1342
tcg_out_opc_rotri_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
1343
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_bstrpick_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6,
1344
tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRPICK_D, d, j, uk6, um6));
1345
}
1346
1347
+/* Emits the `fmov.d fd, fj` instruction. */
1348
+static void __attribute__((unused))
1349
+tcg_out_opc_fmov_d(TCGContext *s, TCGReg fd, TCGReg fj)
1350
+{
1351
+ tcg_out32(s, encode_fdfj_insn(OPC_FMOV_D, fd, fj));
1352
+}
1353
+
1354
+/* Emits the `movgr2fr.d fd, j` instruction. */
1355
+static void __attribute__((unused))
1356
+tcg_out_opc_movgr2fr_d(TCGContext *s, TCGReg fd, TCGReg j)
1357
+{
1358
+ tcg_out32(s, encode_fdj_insn(OPC_MOVGR2FR_D, fd, j));
1359
+}
1360
+
1361
+/* Emits the `movfr2gr.d d, fj` instruction. */
1362
+static void __attribute__((unused))
1363
+tcg_out_opc_movfr2gr_d(TCGContext *s, TCGReg d, TCGReg fj)
1364
+{
1365
+ tcg_out32(s, encode_dfj_insn(OPC_MOVFR2GR_D, d, fj));
1366
+}
1367
+
1368
/* Emits the `slti d, j, sk12` instruction. */
1369
static void __attribute__((unused))
1370
tcg_out_opc_slti(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
1371
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
1372
tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
1373
}
1374
1375
-/* Emits the `vfmadd.s vd, vj, vk, va` instruction. */
1376
-static void __attribute__((unused))
1377
-tcg_out_opc_vfmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1378
-{
64
-{
1379
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_S, vd, vj, vk, va));
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
1380
-}
82
-}
1381
-
83
-
1382
-/* Emits the `vfmadd.d vd, vj, vk, va` instruction. */
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
1383
-static void __attribute__((unused))
1384
-tcg_out_opc_vfmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1385
-{
1386
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_D, vd, vj, vk, va));
1387
-}
1388
-
1389
-/* Emits the `vfmsub.s vd, vj, vk, va` instruction. */
1390
-static void __attribute__((unused))
1391
-tcg_out_opc_vfmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1392
-{
1393
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_S, vd, vj, vk, va));
1394
-}
1395
-
1396
-/* Emits the `vfmsub.d vd, vj, vk, va` instruction. */
1397
-static void __attribute__((unused))
1398
-tcg_out_opc_vfmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1399
-{
1400
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_D, vd, vj, vk, va));
1401
-}
1402
-
1403
-/* Emits the `vfnmadd.s vd, vj, vk, va` instruction. */
1404
-static void __attribute__((unused))
1405
-tcg_out_opc_vfnmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1406
-{
1407
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_S, vd, vj, vk, va));
1408
-}
1409
-
1410
-/* Emits the `vfnmadd.d vd, vj, vk, va` instruction. */
1411
-static void __attribute__((unused))
1412
-tcg_out_opc_vfnmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1413
-{
1414
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_D, vd, vj, vk, va));
1415
-}
1416
-
1417
-/* Emits the `vfnmsub.s vd, vj, vk, va` instruction. */
1418
-static void __attribute__((unused))
1419
-tcg_out_opc_vfnmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1420
-{
1421
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_S, vd, vj, vk, va));
1422
-}
1423
-
1424
-/* Emits the `vfnmsub.d vd, vj, vk, va` instruction. */
1425
-static void __attribute__((unused))
1426
-tcg_out_opc_vfnmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1427
-{
1428
- tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_D, vd, vj, vk, va));
1429
-}
1430
-
1431
-/* Emits the `vfcmp.caf.s vd, vj, vk` instruction. */
1432
-static void __attribute__((unused))
1433
-tcg_out_opc_vfcmp_caf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1434
-{
1435
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_S, vd, vj, vk));
1436
-}
1437
-
1438
-/* Emits the `vfcmp.saf.s vd, vj, vk` instruction. */
1439
-static void __attribute__((unused))
1440
-tcg_out_opc_vfcmp_saf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1441
-{
1442
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_S, vd, vj, vk));
1443
-}
1444
-
1445
-/* Emits the `vfcmp.clt.s vd, vj, vk` instruction. */
1446
-static void __attribute__((unused))
1447
-tcg_out_opc_vfcmp_clt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1448
-{
1449
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_S, vd, vj, vk));
1450
-}
1451
-
1452
-/* Emits the `vfcmp.slt.s vd, vj, vk` instruction. */
1453
-static void __attribute__((unused))
1454
-tcg_out_opc_vfcmp_slt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1455
-{
1456
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_S, vd, vj, vk));
1457
-}
1458
-
1459
-/* Emits the `vfcmp.ceq.s vd, vj, vk` instruction. */
1460
-static void __attribute__((unused))
1461
-tcg_out_opc_vfcmp_ceq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1462
-{
1463
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_S, vd, vj, vk));
1464
-}
1465
-
1466
-/* Emits the `vfcmp.seq.s vd, vj, vk` instruction. */
1467
-static void __attribute__((unused))
1468
-tcg_out_opc_vfcmp_seq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1469
-{
1470
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_S, vd, vj, vk));
1471
-}
1472
-
1473
-/* Emits the `vfcmp.cle.s vd, vj, vk` instruction. */
1474
-static void __attribute__((unused))
1475
-tcg_out_opc_vfcmp_cle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1476
-{
1477
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_S, vd, vj, vk));
1478
-}
1479
-
1480
-/* Emits the `vfcmp.sle.s vd, vj, vk` instruction. */
1481
-static void __attribute__((unused))
1482
-tcg_out_opc_vfcmp_sle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1483
-{
1484
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_S, vd, vj, vk));
1485
-}
1486
-
1487
-/* Emits the `vfcmp.cun.s vd, vj, vk` instruction. */
1488
-static void __attribute__((unused))
1489
-tcg_out_opc_vfcmp_cun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1490
-{
1491
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_S, vd, vj, vk));
1492
-}
1493
-
1494
-/* Emits the `vfcmp.sun.s vd, vj, vk` instruction. */
1495
-static void __attribute__((unused))
1496
-tcg_out_opc_vfcmp_sun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1497
-{
1498
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_S, vd, vj, vk));
1499
-}
1500
-
1501
-/* Emits the `vfcmp.cult.s vd, vj, vk` instruction. */
1502
-static void __attribute__((unused))
1503
-tcg_out_opc_vfcmp_cult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1504
-{
1505
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_S, vd, vj, vk));
1506
-}
1507
-
1508
-/* Emits the `vfcmp.sult.s vd, vj, vk` instruction. */
1509
-static void __attribute__((unused))
1510
-tcg_out_opc_vfcmp_sult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1511
-{
1512
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_S, vd, vj, vk));
1513
-}
1514
-
1515
-/* Emits the `vfcmp.cueq.s vd, vj, vk` instruction. */
1516
-static void __attribute__((unused))
1517
-tcg_out_opc_vfcmp_cueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1518
-{
1519
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_S, vd, vj, vk));
1520
-}
1521
-
1522
-/* Emits the `vfcmp.sueq.s vd, vj, vk` instruction. */
1523
-static void __attribute__((unused))
1524
-tcg_out_opc_vfcmp_sueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1525
-{
1526
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_S, vd, vj, vk));
1527
-}
1528
-
1529
-/* Emits the `vfcmp.cule.s vd, vj, vk` instruction. */
1530
-static void __attribute__((unused))
1531
-tcg_out_opc_vfcmp_cule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1532
-{
1533
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_S, vd, vj, vk));
1534
-}
1535
-
1536
-/* Emits the `vfcmp.sule.s vd, vj, vk` instruction. */
1537
-static void __attribute__((unused))
1538
-tcg_out_opc_vfcmp_sule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1539
-{
1540
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_S, vd, vj, vk));
1541
-}
1542
-
1543
-/* Emits the `vfcmp.cne.s vd, vj, vk` instruction. */
1544
-static void __attribute__((unused))
1545
-tcg_out_opc_vfcmp_cne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1546
-{
1547
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_S, vd, vj, vk));
1548
-}
1549
-
1550
-/* Emits the `vfcmp.sne.s vd, vj, vk` instruction. */
1551
-static void __attribute__((unused))
1552
-tcg_out_opc_vfcmp_sne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1553
-{
1554
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_S, vd, vj, vk));
1555
-}
1556
-
1557
-/* Emits the `vfcmp.cor.s vd, vj, vk` instruction. */
1558
-static void __attribute__((unused))
1559
-tcg_out_opc_vfcmp_cor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1560
-{
1561
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_S, vd, vj, vk));
1562
-}
1563
-
1564
-/* Emits the `vfcmp.sor.s vd, vj, vk` instruction. */
1565
-static void __attribute__((unused))
1566
-tcg_out_opc_vfcmp_sor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1567
-{
1568
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_S, vd, vj, vk));
1569
-}
1570
-
1571
-/* Emits the `vfcmp.cune.s vd, vj, vk` instruction. */
1572
-static void __attribute__((unused))
1573
-tcg_out_opc_vfcmp_cune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1574
-{
1575
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_S, vd, vj, vk));
1576
-}
1577
-
1578
-/* Emits the `vfcmp.sune.s vd, vj, vk` instruction. */
1579
-static void __attribute__((unused))
1580
-tcg_out_opc_vfcmp_sune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1581
-{
1582
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_S, vd, vj, vk));
1583
-}
1584
-
1585
-/* Emits the `vfcmp.caf.d vd, vj, vk` instruction. */
1586
-static void __attribute__((unused))
1587
-tcg_out_opc_vfcmp_caf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1588
-{
1589
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_D, vd, vj, vk));
1590
-}
1591
-
1592
-/* Emits the `vfcmp.saf.d vd, vj, vk` instruction. */
1593
-static void __attribute__((unused))
1594
-tcg_out_opc_vfcmp_saf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1595
-{
1596
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_D, vd, vj, vk));
1597
-}
1598
-
1599
-/* Emits the `vfcmp.clt.d vd, vj, vk` instruction. */
1600
-static void __attribute__((unused))
1601
-tcg_out_opc_vfcmp_clt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1602
-{
1603
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_D, vd, vj, vk));
1604
-}
1605
-
1606
-/* Emits the `vfcmp.slt.d vd, vj, vk` instruction. */
1607
-static void __attribute__((unused))
1608
-tcg_out_opc_vfcmp_slt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1609
-{
1610
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_D, vd, vj, vk));
1611
-}
1612
-
1613
-/* Emits the `vfcmp.ceq.d vd, vj, vk` instruction. */
1614
-static void __attribute__((unused))
1615
-tcg_out_opc_vfcmp_ceq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1616
-{
1617
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_D, vd, vj, vk));
1618
-}
1619
-
1620
-/* Emits the `vfcmp.seq.d vd, vj, vk` instruction. */
1621
-static void __attribute__((unused))
1622
-tcg_out_opc_vfcmp_seq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1623
-{
1624
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_D, vd, vj, vk));
1625
-}
1626
-
1627
-/* Emits the `vfcmp.cle.d vd, vj, vk` instruction. */
1628
-static void __attribute__((unused))
1629
-tcg_out_opc_vfcmp_cle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1630
-{
1631
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_D, vd, vj, vk));
1632
-}
1633
-
1634
-/* Emits the `vfcmp.sle.d vd, vj, vk` instruction. */
1635
-static void __attribute__((unused))
1636
-tcg_out_opc_vfcmp_sle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1637
-{
1638
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_D, vd, vj, vk));
1639
-}
1640
-
1641
-/* Emits the `vfcmp.cun.d vd, vj, vk` instruction. */
1642
-static void __attribute__((unused))
1643
-tcg_out_opc_vfcmp_cun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1644
-{
1645
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_D, vd, vj, vk));
1646
-}
1647
-
1648
-/* Emits the `vfcmp.sun.d vd, vj, vk` instruction. */
1649
-static void __attribute__((unused))
1650
-tcg_out_opc_vfcmp_sun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1651
-{
1652
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_D, vd, vj, vk));
1653
-}
1654
-
1655
-/* Emits the `vfcmp.cult.d vd, vj, vk` instruction. */
1656
-static void __attribute__((unused))
1657
-tcg_out_opc_vfcmp_cult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1658
-{
1659
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_D, vd, vj, vk));
1660
-}
1661
-
1662
-/* Emits the `vfcmp.sult.d vd, vj, vk` instruction. */
1663
-static void __attribute__((unused))
1664
-tcg_out_opc_vfcmp_sult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1665
-{
1666
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_D, vd, vj, vk));
1667
-}
1668
-
1669
-/* Emits the `vfcmp.cueq.d vd, vj, vk` instruction. */
1670
-static void __attribute__((unused))
1671
-tcg_out_opc_vfcmp_cueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1672
-{
1673
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_D, vd, vj, vk));
1674
-}
1675
-
1676
-/* Emits the `vfcmp.sueq.d vd, vj, vk` instruction. */
1677
-static void __attribute__((unused))
1678
-tcg_out_opc_vfcmp_sueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1679
-{
1680
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_D, vd, vj, vk));
1681
-}
1682
-
1683
-/* Emits the `vfcmp.cule.d vd, vj, vk` instruction. */
1684
-static void __attribute__((unused))
1685
-tcg_out_opc_vfcmp_cule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1686
-{
1687
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_D, vd, vj, vk));
1688
-}
1689
-
1690
-/* Emits the `vfcmp.sule.d vd, vj, vk` instruction. */
1691
-static void __attribute__((unused))
1692
-tcg_out_opc_vfcmp_sule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1693
-{
1694
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_D, vd, vj, vk));
1695
-}
1696
-
1697
-/* Emits the `vfcmp.cne.d vd, vj, vk` instruction. */
1698
-static void __attribute__((unused))
1699
-tcg_out_opc_vfcmp_cne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1700
-{
1701
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_D, vd, vj, vk));
1702
-}
1703
-
1704
-/* Emits the `vfcmp.sne.d vd, vj, vk` instruction. */
1705
-static void __attribute__((unused))
1706
-tcg_out_opc_vfcmp_sne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1707
-{
1708
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_D, vd, vj, vk));
1709
-}
1710
-
1711
-/* Emits the `vfcmp.cor.d vd, vj, vk` instruction. */
1712
-static void __attribute__((unused))
1713
-tcg_out_opc_vfcmp_cor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1714
-{
1715
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_D, vd, vj, vk));
1716
-}
1717
-
1718
-/* Emits the `vfcmp.sor.d vd, vj, vk` instruction. */
1719
-static void __attribute__((unused))
1720
-tcg_out_opc_vfcmp_sor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1721
-{
1722
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_D, vd, vj, vk));
1723
-}
1724
-
1725
-/* Emits the `vfcmp.cune.d vd, vj, vk` instruction. */
1726
-static void __attribute__((unused))
1727
-tcg_out_opc_vfcmp_cune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1728
-{
1729
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_D, vd, vj, vk));
1730
-}
1731
-
1732
-/* Emits the `vfcmp.sune.d vd, vj, vk` instruction. */
1733
-static void __attribute__((unused))
1734
-tcg_out_opc_vfcmp_sune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1735
-{
1736
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_D, vd, vj, vk));
1737
-}
1738
-
1739
/* Emits the `vbitsel.v vd, vj, vk, va` instruction. */
1740
static void __attribute__((unused))
1741
tcg_out_opc_vbitsel_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1742
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vbitsel_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1743
tcg_out32(s, encode_vdvjvkva_insn(OPC_VBITSEL_V, vd, vj, vk, va));
1744
}
1745
1746
+/* Emits the `xvbitsel.v xd, xj, xk, xa` instruction. */
1747
+static void __attribute__((unused))
1748
+tcg_out_opc_xvbitsel_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk,
1749
+ TCGReg xa)
1750
+{
1751
+ tcg_out32(s, encode_xdxjxkxa_insn(OPC_XVBITSEL_V, xd, xj, xk, xa));
1752
+}
1753
+
1754
/* Emits the `vshuf.b vd, vj, vk, va` instruction. */
1755
static void __attribute__((unused))
1756
tcg_out_opc_vshuf_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1757
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vshuf_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1758
tcg_out32(s, encode_vdvjvkva_insn(OPC_VSHUF_B, vd, vj, vk, va));
1759
}
1760
1761
+/* Emits the `xvshuf.b xd, xj, xk, xa` instruction. */
1762
+static void __attribute__((unused))
1763
+tcg_out_opc_xvshuf_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk, TCGReg xa)
1764
+{
1765
+ tcg_out32(s, encode_xdxjxkxa_insn(OPC_XVSHUF_B, xd, xj, xk, xa));
1766
+}
1767
+
1768
/* Emits the `addu16i.d d, j, sk16` instruction. */
1769
static void __attribute__((unused))
1770
tcg_out_opc_addu16i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1771
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
1772
tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
1773
}
1774
1775
+/* Emits the `fld.s fd, j, sk12` instruction. */
1776
+static void __attribute__((unused))
1777
+tcg_out_opc_fld_s(TCGContext *s, TCGReg fd, TCGReg j, int32_t sk12)
1778
+{
1779
+ tcg_out32(s, encode_fdjsk12_insn(OPC_FLD_S, fd, j, sk12));
1780
+}
1781
+
1782
+/* Emits the `fst.s fd, j, sk12` instruction. */
1783
+static void __attribute__((unused))
1784
+tcg_out_opc_fst_s(TCGContext *s, TCGReg fd, TCGReg j, int32_t sk12)
1785
+{
1786
+ tcg_out32(s, encode_fdjsk12_insn(OPC_FST_S, fd, j, sk12));
1787
+}
1788
+
1789
+/* Emits the `fld.d fd, j, sk12` instruction. */
1790
+static void __attribute__((unused))
1791
+tcg_out_opc_fld_d(TCGContext *s, TCGReg fd, TCGReg j, int32_t sk12)
1792
+{
1793
+ tcg_out32(s, encode_fdjsk12_insn(OPC_FLD_D, fd, j, sk12));
1794
+}
1795
+
1796
+/* Emits the `fst.d fd, j, sk12` instruction. */
1797
+static void __attribute__((unused))
1798
+tcg_out_opc_fst_d(TCGContext *s, TCGReg fd, TCGReg j, int32_t sk12)
1799
+{
1800
+ tcg_out32(s, encode_fdjsk12_insn(OPC_FST_D, fd, j, sk12));
1801
+}
1802
+
1803
/* Emits the `vld vd, j, sk12` instruction. */
1804
static void __attribute__((unused))
1805
tcg_out_opc_vld(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1806
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vst(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1807
tcg_out32(s, encode_vdjsk12_insn(OPC_VST, vd, j, sk12));
1808
}
1809
1810
+/* Emits the `xvld xd, j, sk12` instruction. */
1811
+static void __attribute__((unused))
1812
+tcg_out_opc_xvld(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk12)
1813
+{
1814
+ tcg_out32(s, encode_xdjsk12_insn(OPC_XVLD, xd, j, sk12));
1815
+}
1816
+
1817
+/* Emits the `xvst xd, j, sk12` instruction. */
1818
+static void __attribute__((unused))
1819
+tcg_out_opc_xvst(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk12)
1820
+{
1821
+ tcg_out32(s, encode_xdjsk12_insn(OPC_XVST, xd, j, sk12));
1822
+}
1823
+
1824
/* Emits the `vldrepl.d vd, j, sk9` instruction. */
1825
static void __attribute__((unused))
1826
tcg_out_opc_vldrepl_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk9)
1827
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vstelm_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1828
tcg_out32(s, encode_vdjsk8un4_insn(OPC_VSTELM_B, vd, j, sk8, un4));
1829
}
1830
1831
+/* Emits the `xvldrepl.d xd, j, sk9` instruction. */
1832
+static void __attribute__((unused))
1833
+tcg_out_opc_xvldrepl_d(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk9)
1834
+{
1835
+ tcg_out32(s, encode_xdjsk9_insn(OPC_XVLDREPL_D, xd, j, sk9));
1836
+}
1837
+
1838
+/* Emits the `xvldrepl.w xd, j, sk10` instruction. */
1839
+static void __attribute__((unused))
1840
+tcg_out_opc_xvldrepl_w(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk10)
1841
+{
1842
+ tcg_out32(s, encode_xdjsk10_insn(OPC_XVLDREPL_W, xd, j, sk10));
1843
+}
1844
+
1845
+/* Emits the `xvldrepl.h xd, j, sk11` instruction. */
1846
+static void __attribute__((unused))
1847
+tcg_out_opc_xvldrepl_h(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk11)
1848
+{
1849
+ tcg_out32(s, encode_xdjsk11_insn(OPC_XVLDREPL_H, xd, j, sk11));
1850
+}
1851
+
1852
+/* Emits the `xvldrepl.b xd, j, sk12` instruction. */
1853
+static void __attribute__((unused))
1854
+tcg_out_opc_xvldrepl_b(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk12)
1855
+{
1856
+ tcg_out32(s, encode_xdjsk12_insn(OPC_XVLDREPL_B, xd, j, sk12));
1857
+}
1858
+
1859
+/* Emits the `xvstelm.d xd, j, sk8, un2` instruction. */
1860
+static void __attribute__((unused))
1861
+tcg_out_opc_xvstelm_d(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk8,
1862
+ uint32_t un2)
1863
+{
1864
+ tcg_out32(s, encode_xdjsk8un2_insn(OPC_XVSTELM_D, xd, j, sk8, un2));
1865
+}
1866
+
1867
+/* Emits the `xvstelm.w xd, j, sk8, un3` instruction. */
1868
+static void __attribute__((unused))
1869
+tcg_out_opc_xvstelm_w(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk8,
1870
+ uint32_t un3)
1871
+{
1872
+ tcg_out32(s, encode_xdjsk8un3_insn(OPC_XVSTELM_W, xd, j, sk8, un3));
1873
+}
1874
+
1875
+/* Emits the `xvstelm.h xd, j, sk8, un4` instruction. */
1876
+static void __attribute__((unused))
1877
+tcg_out_opc_xvstelm_h(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk8,
1878
+ uint32_t un4)
1879
+{
1880
+ tcg_out32(s, encode_xdjsk8un4_insn(OPC_XVSTELM_H, xd, j, sk8, un4));
1881
+}
1882
+
1883
+/* Emits the `xvstelm.b xd, j, sk8, un5` instruction. */
1884
+static void __attribute__((unused))
1885
+tcg_out_opc_xvstelm_b(TCGContext *s, TCGReg xd, TCGReg j, int32_t sk8,
1886
+ uint32_t un5)
1887
+{
1888
+ tcg_out32(s, encode_xdjsk8un5_insn(OPC_XVSTELM_B, xd, j, sk8, un5));
1889
+}
1890
+
1891
/* Emits the `ldx.b d, j, k` instruction. */
1892
static void __attribute__((unused))
1893
tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1894
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1895
tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
1896
}
1897
1898
+/* Emits the `fldx.s fd, j, k` instruction. */
1899
+static void __attribute__((unused))
1900
+tcg_out_opc_fldx_s(TCGContext *s, TCGReg fd, TCGReg j, TCGReg k)
1901
+{
1902
+ tcg_out32(s, encode_fdjk_insn(OPC_FLDX_S, fd, j, k));
1903
+}
1904
+
1905
+/* Emits the `fldx.d fd, j, k` instruction. */
1906
+static void __attribute__((unused))
1907
+tcg_out_opc_fldx_d(TCGContext *s, TCGReg fd, TCGReg j, TCGReg k)
1908
+{
1909
+ tcg_out32(s, encode_fdjk_insn(OPC_FLDX_D, fd, j, k));
1910
+}
1911
+
1912
+/* Emits the `fstx.s fd, j, k` instruction. */
1913
+static void __attribute__((unused))
1914
+tcg_out_opc_fstx_s(TCGContext *s, TCGReg fd, TCGReg j, TCGReg k)
1915
+{
1916
+ tcg_out32(s, encode_fdjk_insn(OPC_FSTX_S, fd, j, k));
1917
+}
1918
+
1919
+/* Emits the `fstx.d fd, j, k` instruction. */
1920
+static void __attribute__((unused))
1921
+tcg_out_opc_fstx_d(TCGContext *s, TCGReg fd, TCGReg j, TCGReg k)
1922
+{
1923
+ tcg_out32(s, encode_fdjk_insn(OPC_FSTX_D, fd, j, k));
1924
+}
1925
+
1926
/* Emits the `vldx vd, j, k` instruction. */
1927
static void __attribute__((unused))
1928
tcg_out_opc_vldx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
1929
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vstx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
1930
tcg_out32(s, encode_vdjk_insn(OPC_VSTX, vd, j, k));
1931
}
1932
1933
+/* Emits the `xvldx xd, j, k` instruction. */
1934
+static void __attribute__((unused))
1935
+tcg_out_opc_xvldx(TCGContext *s, TCGReg xd, TCGReg j, TCGReg k)
1936
+{
1937
+ tcg_out32(s, encode_xdjk_insn(OPC_XVLDX, xd, j, k));
1938
+}
1939
+
1940
+/* Emits the `xvstx xd, j, k` instruction. */
1941
+static void __attribute__((unused))
1942
+tcg_out_opc_xvstx(TCGContext *s, TCGReg xd, TCGReg j, TCGReg k)
1943
+{
1944
+ tcg_out32(s, encode_xdjk_insn(OPC_XVSTX, xd, j, k));
1945
+}
1946
+
1947
/* Emits the `dbar ud15` instruction. */
1948
static void __attribute__((unused))
1949
tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
1950
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
1951
tcg_out32(s, encode_ud15_insn(OPC_DBAR, ud15));
1952
}
1953
1954
+/* Emits the `jiscr0 sd5k16` instruction. */
1955
+static void __attribute__((unused))
1956
+tcg_out_opc_jiscr0(TCGContext *s, int32_t sd5k16)
1957
+{
1958
+ tcg_out32(s, encode_sd5k16_insn(OPC_JISCR0, sd5k16));
1959
+}
1960
+
1961
+/* Emits the `jiscr1 sd5k16` instruction. */
1962
+static void __attribute__((unused))
1963
+tcg_out_opc_jiscr1(TCGContext *s, int32_t sd5k16)
1964
+{
1965
+ tcg_out32(s, encode_sd5k16_insn(OPC_JISCR1, sd5k16));
1966
+}
1967
+
1968
/* Emits the `jirl d, j, sk16` instruction. */
1969
static void __attribute__((unused))
1970
tcg_out_opc_jirl(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1971
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1972
tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_D, vd, vj, vk));
1973
}
1974
1975
-/* Emits the `vaddwev.h.b vd, vj, vk` instruction. */
1976
-static void __attribute__((unused))
1977
-tcg_out_opc_vaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1978
-{
1979
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_B, vd, vj, vk));
1980
-}
1981
-
1982
-/* Emits the `vaddwev.w.h vd, vj, vk` instruction. */
1983
-static void __attribute__((unused))
1984
-tcg_out_opc_vaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1985
-{
1986
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_H, vd, vj, vk));
1987
-}
1988
-
1989
-/* Emits the `vaddwev.d.w vd, vj, vk` instruction. */
1990
-static void __attribute__((unused))
1991
-tcg_out_opc_vaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1992
-{
1993
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_W, vd, vj, vk));
1994
-}
1995
-
1996
-/* Emits the `vaddwev.q.d vd, vj, vk` instruction. */
1997
-static void __attribute__((unused))
1998
-tcg_out_opc_vaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1999
-{
2000
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_D, vd, vj, vk));
2001
-}
2002
-
2003
-/* Emits the `vsubwev.h.b vd, vj, vk` instruction. */
2004
-static void __attribute__((unused))
2005
-tcg_out_opc_vsubwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2006
-{
2007
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_B, vd, vj, vk));
2008
-}
2009
-
2010
-/* Emits the `vsubwev.w.h vd, vj, vk` instruction. */
2011
-static void __attribute__((unused))
2012
-tcg_out_opc_vsubwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2013
-{
2014
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_H, vd, vj, vk));
2015
-}
2016
-
2017
-/* Emits the `vsubwev.d.w vd, vj, vk` instruction. */
2018
-static void __attribute__((unused))
2019
-tcg_out_opc_vsubwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2020
-{
2021
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_W, vd, vj, vk));
2022
-}
2023
-
2024
-/* Emits the `vsubwev.q.d vd, vj, vk` instruction. */
2025
-static void __attribute__((unused))
2026
-tcg_out_opc_vsubwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2027
-{
2028
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_D, vd, vj, vk));
2029
-}
2030
-
2031
-/* Emits the `vaddwod.h.b vd, vj, vk` instruction. */
2032
-static void __attribute__((unused))
2033
-tcg_out_opc_vaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2034
-{
2035
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_B, vd, vj, vk));
2036
-}
2037
-
2038
-/* Emits the `vaddwod.w.h vd, vj, vk` instruction. */
2039
-static void __attribute__((unused))
2040
-tcg_out_opc_vaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2041
-{
2042
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_H, vd, vj, vk));
2043
-}
2044
-
2045
-/* Emits the `vaddwod.d.w vd, vj, vk` instruction. */
2046
-static void __attribute__((unused))
2047
-tcg_out_opc_vaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2048
-{
2049
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_W, vd, vj, vk));
2050
-}
2051
-
2052
-/* Emits the `vaddwod.q.d vd, vj, vk` instruction. */
2053
-static void __attribute__((unused))
2054
-tcg_out_opc_vaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2055
-{
2056
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_D, vd, vj, vk));
2057
-}
2058
-
2059
-/* Emits the `vsubwod.h.b vd, vj, vk` instruction. */
2060
-static void __attribute__((unused))
2061
-tcg_out_opc_vsubwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2062
-{
2063
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_B, vd, vj, vk));
2064
-}
2065
-
2066
-/* Emits the `vsubwod.w.h vd, vj, vk` instruction. */
2067
-static void __attribute__((unused))
2068
-tcg_out_opc_vsubwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2069
-{
2070
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_H, vd, vj, vk));
2071
-}
2072
-
2073
-/* Emits the `vsubwod.d.w vd, vj, vk` instruction. */
2074
-static void __attribute__((unused))
2075
-tcg_out_opc_vsubwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2076
-{
2077
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_W, vd, vj, vk));
2078
-}
2079
-
2080
-/* Emits the `vsubwod.q.d vd, vj, vk` instruction. */
2081
-static void __attribute__((unused))
2082
-tcg_out_opc_vsubwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2083
-{
2084
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_D, vd, vj, vk));
2085
-}
2086
-
2087
-/* Emits the `vaddwev.h.bu vd, vj, vk` instruction. */
2088
-static void __attribute__((unused))
2089
-tcg_out_opc_vaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2090
-{
2091
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU, vd, vj, vk));
2092
-}
2093
-
2094
-/* Emits the `vaddwev.w.hu vd, vj, vk` instruction. */
2095
-static void __attribute__((unused))
2096
-tcg_out_opc_vaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2097
-{
2098
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU, vd, vj, vk));
2099
-}
2100
-
2101
-/* Emits the `vaddwev.d.wu vd, vj, vk` instruction. */
2102
-static void __attribute__((unused))
2103
-tcg_out_opc_vaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2104
-{
2105
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU, vd, vj, vk));
2106
-}
2107
-
2108
-/* Emits the `vaddwev.q.du vd, vj, vk` instruction. */
2109
-static void __attribute__((unused))
2110
-tcg_out_opc_vaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2111
-{
2112
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU, vd, vj, vk));
2113
-}
2114
-
2115
-/* Emits the `vsubwev.h.bu vd, vj, vk` instruction. */
2116
-static void __attribute__((unused))
2117
-tcg_out_opc_vsubwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2118
-{
2119
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_BU, vd, vj, vk));
2120
-}
2121
-
2122
-/* Emits the `vsubwev.w.hu vd, vj, vk` instruction. */
2123
-static void __attribute__((unused))
2124
-tcg_out_opc_vsubwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2125
-{
2126
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_HU, vd, vj, vk));
2127
-}
2128
-
2129
-/* Emits the `vsubwev.d.wu vd, vj, vk` instruction. */
2130
-static void __attribute__((unused))
2131
-tcg_out_opc_vsubwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2132
-{
2133
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_WU, vd, vj, vk));
2134
-}
2135
-
2136
-/* Emits the `vsubwev.q.du vd, vj, vk` instruction. */
2137
-static void __attribute__((unused))
2138
-tcg_out_opc_vsubwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2139
-{
2140
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_DU, vd, vj, vk));
2141
-}
2142
-
2143
-/* Emits the `vaddwod.h.bu vd, vj, vk` instruction. */
2144
-static void __attribute__((unused))
2145
-tcg_out_opc_vaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2146
-{
2147
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU, vd, vj, vk));
2148
-}
2149
-
2150
-/* Emits the `vaddwod.w.hu vd, vj, vk` instruction. */
2151
-static void __attribute__((unused))
2152
-tcg_out_opc_vaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2153
-{
2154
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU, vd, vj, vk));
2155
-}
2156
-
2157
-/* Emits the `vaddwod.d.wu vd, vj, vk` instruction. */
2158
-static void __attribute__((unused))
2159
-tcg_out_opc_vaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2160
-{
2161
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU, vd, vj, vk));
2162
-}
2163
-
2164
-/* Emits the `vaddwod.q.du vd, vj, vk` instruction. */
2165
-static void __attribute__((unused))
2166
-tcg_out_opc_vaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2167
-{
2168
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU, vd, vj, vk));
2169
-}
2170
-
2171
-/* Emits the `vsubwod.h.bu vd, vj, vk` instruction. */
2172
-static void __attribute__((unused))
2173
-tcg_out_opc_vsubwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2174
-{
2175
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_BU, vd, vj, vk));
2176
-}
2177
-
2178
-/* Emits the `vsubwod.w.hu vd, vj, vk` instruction. */
2179
-static void __attribute__((unused))
2180
-tcg_out_opc_vsubwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2181
-{
2182
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_HU, vd, vj, vk));
2183
-}
2184
-
2185
-/* Emits the `vsubwod.d.wu vd, vj, vk` instruction. */
2186
-static void __attribute__((unused))
2187
-tcg_out_opc_vsubwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2188
-{
2189
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_WU, vd, vj, vk));
2190
-}
2191
-
2192
-/* Emits the `vsubwod.q.du vd, vj, vk` instruction. */
2193
-static void __attribute__((unused))
2194
-tcg_out_opc_vsubwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2195
-{
2196
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_DU, vd, vj, vk));
2197
-}
2198
-
2199
-/* Emits the `vaddwev.h.bu.b vd, vj, vk` instruction. */
2200
-static void __attribute__((unused))
2201
-tcg_out_opc_vaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2202
-{
2203
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU_B, vd, vj, vk));
2204
-}
2205
-
2206
-/* Emits the `vaddwev.w.hu.h vd, vj, vk` instruction. */
2207
-static void __attribute__((unused))
2208
-tcg_out_opc_vaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2209
-{
2210
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU_H, vd, vj, vk));
2211
-}
2212
-
2213
-/* Emits the `vaddwev.d.wu.w vd, vj, vk` instruction. */
2214
-static void __attribute__((unused))
2215
-tcg_out_opc_vaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2216
-{
2217
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU_W, vd, vj, vk));
2218
-}
2219
-
2220
-/* Emits the `vaddwev.q.du.d vd, vj, vk` instruction. */
2221
-static void __attribute__((unused))
2222
-tcg_out_opc_vaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2223
-{
2224
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU_D, vd, vj, vk));
2225
-}
2226
-
2227
-/* Emits the `vaddwod.h.bu.b vd, vj, vk` instruction. */
2228
-static void __attribute__((unused))
2229
-tcg_out_opc_vaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2230
-{
2231
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU_B, vd, vj, vk));
2232
-}
2233
-
2234
-/* Emits the `vaddwod.w.hu.h vd, vj, vk` instruction. */
2235
-static void __attribute__((unused))
2236
-tcg_out_opc_vaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2237
-{
2238
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU_H, vd, vj, vk));
2239
-}
2240
-
2241
-/* Emits the `vaddwod.d.wu.w vd, vj, vk` instruction. */
2242
-static void __attribute__((unused))
2243
-tcg_out_opc_vaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2244
-{
2245
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU_W, vd, vj, vk));
2246
-}
2247
-
2248
-/* Emits the `vaddwod.q.du.d vd, vj, vk` instruction. */
2249
-static void __attribute__((unused))
2250
-tcg_out_opc_vaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2251
-{
2252
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU_D, vd, vj, vk));
2253
-}
2254
-
2255
/* Emits the `vsadd.b vd, vj, vk` instruction. */
2256
static void __attribute__((unused))
2257
tcg_out_opc_vsadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2258
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vssub_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2259
tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_DU, vd, vj, vk));
2260
}
2261
2262
-/* Emits the `vhaddw.h.b vd, vj, vk` instruction. */
2263
-static void __attribute__((unused))
2264
-tcg_out_opc_vhaddw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2265
-{
2266
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_H_B, vd, vj, vk));
2267
-}
2268
-
2269
-/* Emits the `vhaddw.w.h vd, vj, vk` instruction. */
2270
-static void __attribute__((unused))
2271
-tcg_out_opc_vhaddw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2272
-{
2273
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_W_H, vd, vj, vk));
2274
-}
2275
-
2276
-/* Emits the `vhaddw.d.w vd, vj, vk` instruction. */
2277
-static void __attribute__((unused))
2278
-tcg_out_opc_vhaddw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2279
-{
2280
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_D_W, vd, vj, vk));
2281
-}
2282
-
2283
-/* Emits the `vhaddw.q.d vd, vj, vk` instruction. */
2284
-static void __attribute__((unused))
2285
-tcg_out_opc_vhaddw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2286
-{
2287
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_Q_D, vd, vj, vk));
2288
-}
2289
-
2290
-/* Emits the `vhsubw.h.b vd, vj, vk` instruction. */
2291
-static void __attribute__((unused))
2292
-tcg_out_opc_vhsubw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2293
-{
2294
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_H_B, vd, vj, vk));
2295
-}
2296
-
2297
-/* Emits the `vhsubw.w.h vd, vj, vk` instruction. */
2298
-static void __attribute__((unused))
2299
-tcg_out_opc_vhsubw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2300
-{
2301
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_W_H, vd, vj, vk));
2302
-}
2303
-
2304
-/* Emits the `vhsubw.d.w vd, vj, vk` instruction. */
2305
-static void __attribute__((unused))
2306
-tcg_out_opc_vhsubw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2307
-{
2308
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_D_W, vd, vj, vk));
2309
-}
2310
-
2311
-/* Emits the `vhsubw.q.d vd, vj, vk` instruction. */
2312
-static void __attribute__((unused))
2313
-tcg_out_opc_vhsubw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2314
-{
2315
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_Q_D, vd, vj, vk));
2316
-}
2317
-
2318
-/* Emits the `vhaddw.hu.bu vd, vj, vk` instruction. */
2319
-static void __attribute__((unused))
2320
-tcg_out_opc_vhaddw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2321
-{
2322
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_HU_BU, vd, vj, vk));
2323
-}
2324
-
2325
-/* Emits the `vhaddw.wu.hu vd, vj, vk` instruction. */
2326
-static void __attribute__((unused))
2327
-tcg_out_opc_vhaddw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2328
-{
2329
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_WU_HU, vd, vj, vk));
2330
-}
2331
-
2332
-/* Emits the `vhaddw.du.wu vd, vj, vk` instruction. */
2333
-static void __attribute__((unused))
2334
-tcg_out_opc_vhaddw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2335
-{
2336
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_DU_WU, vd, vj, vk));
2337
-}
2338
-
2339
-/* Emits the `vhaddw.qu.du vd, vj, vk` instruction. */
2340
-static void __attribute__((unused))
2341
-tcg_out_opc_vhaddw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2342
-{
2343
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_QU_DU, vd, vj, vk));
2344
-}
2345
-
2346
-/* Emits the `vhsubw.hu.bu vd, vj, vk` instruction. */
2347
-static void __attribute__((unused))
2348
-tcg_out_opc_vhsubw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2349
-{
2350
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_HU_BU, vd, vj, vk));
2351
-}
2352
-
2353
-/* Emits the `vhsubw.wu.hu vd, vj, vk` instruction. */
2354
-static void __attribute__((unused))
2355
-tcg_out_opc_vhsubw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2356
-{
2357
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_WU_HU, vd, vj, vk));
2358
-}
2359
-
2360
-/* Emits the `vhsubw.du.wu vd, vj, vk` instruction. */
2361
-static void __attribute__((unused))
2362
-tcg_out_opc_vhsubw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2363
-{
2364
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_DU_WU, vd, vj, vk));
2365
-}
2366
-
2367
-/* Emits the `vhsubw.qu.du vd, vj, vk` instruction. */
2368
-static void __attribute__((unused))
2369
-tcg_out_opc_vhsubw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2370
-{
2371
- tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_QU_DU, vd, vj, vk));
2372
-}
2373
-
2374
-/* Emits the `vadda.b vd, vj, vk` instruction. */
2375
-static void __attribute__((unused))
2376
-tcg_out_opc_vadda_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2377
-{
2378
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_B, vd, vj, vk));
2379
-}
2380
-
2381
-/* Emits the `vadda.h vd, vj, vk` instruction. */
2382
-static void __attribute__((unused))
2383
-tcg_out_opc_vadda_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2384
-{
2385
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_H, vd, vj, vk));
2386
-}
2387
-
2388
-/* Emits the `vadda.w vd, vj, vk` instruction. */
2389
-static void __attribute__((unused))
2390
-tcg_out_opc_vadda_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2391
-{
2392
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_W, vd, vj, vk));
2393
-}
2394
-
2395
-/* Emits the `vadda.d vd, vj, vk` instruction. */
2396
-static void __attribute__((unused))
2397
-tcg_out_opc_vadda_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2398
-{
2399
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_D, vd, vj, vk));
2400
-}
2401
-
2402
-/* Emits the `vabsd.b vd, vj, vk` instruction. */
2403
-static void __attribute__((unused))
2404
-tcg_out_opc_vabsd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2405
-{
2406
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_B, vd, vj, vk));
2407
-}
2408
-
2409
-/* Emits the `vabsd.h vd, vj, vk` instruction. */
2410
-static void __attribute__((unused))
2411
-tcg_out_opc_vabsd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2412
-{
2413
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_H, vd, vj, vk));
2414
-}
2415
-
2416
-/* Emits the `vabsd.w vd, vj, vk` instruction. */
2417
-static void __attribute__((unused))
2418
-tcg_out_opc_vabsd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2419
-{
2420
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_W, vd, vj, vk));
2421
-}
2422
-
2423
-/* Emits the `vabsd.d vd, vj, vk` instruction. */
2424
-static void __attribute__((unused))
2425
-tcg_out_opc_vabsd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2426
-{
2427
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_D, vd, vj, vk));
2428
-}
2429
-
2430
-/* Emits the `vabsd.bu vd, vj, vk` instruction. */
2431
-static void __attribute__((unused))
2432
-tcg_out_opc_vabsd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2433
-{
2434
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_BU, vd, vj, vk));
2435
-}
2436
-
2437
-/* Emits the `vabsd.hu vd, vj, vk` instruction. */
2438
-static void __attribute__((unused))
2439
-tcg_out_opc_vabsd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2440
-{
2441
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_HU, vd, vj, vk));
2442
-}
2443
-
2444
-/* Emits the `vabsd.wu vd, vj, vk` instruction. */
2445
-static void __attribute__((unused))
2446
-tcg_out_opc_vabsd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2447
-{
2448
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_WU, vd, vj, vk));
2449
-}
2450
-
2451
-/* Emits the `vabsd.du vd, vj, vk` instruction. */
2452
-static void __attribute__((unused))
2453
-tcg_out_opc_vabsd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2454
-{
2455
- tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_DU, vd, vj, vk));
2456
-}
2457
-
2458
-/* Emits the `vavg.b vd, vj, vk` instruction. */
2459
-static void __attribute__((unused))
2460
-tcg_out_opc_vavg_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2461
-{
2462
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_B, vd, vj, vk));
2463
-}
2464
-
2465
-/* Emits the `vavg.h vd, vj, vk` instruction. */
2466
-static void __attribute__((unused))
2467
-tcg_out_opc_vavg_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2468
-{
2469
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_H, vd, vj, vk));
2470
-}
2471
-
2472
-/* Emits the `vavg.w vd, vj, vk` instruction. */
2473
-static void __attribute__((unused))
2474
-tcg_out_opc_vavg_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2475
-{
2476
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_W, vd, vj, vk));
2477
-}
2478
-
2479
-/* Emits the `vavg.d vd, vj, vk` instruction. */
2480
-static void __attribute__((unused))
2481
-tcg_out_opc_vavg_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2482
-{
2483
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_D, vd, vj, vk));
2484
-}
2485
-
2486
-/* Emits the `vavg.bu vd, vj, vk` instruction. */
2487
-static void __attribute__((unused))
2488
-tcg_out_opc_vavg_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2489
-{
2490
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_BU, vd, vj, vk));
2491
-}
2492
-
2493
-/* Emits the `vavg.hu vd, vj, vk` instruction. */
2494
-static void __attribute__((unused))
2495
-tcg_out_opc_vavg_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2496
-{
2497
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_HU, vd, vj, vk));
2498
-}
2499
-
2500
-/* Emits the `vavg.wu vd, vj, vk` instruction. */
2501
-static void __attribute__((unused))
2502
-tcg_out_opc_vavg_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2503
-{
2504
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_WU, vd, vj, vk));
2505
-}
2506
-
2507
-/* Emits the `vavg.du vd, vj, vk` instruction. */
2508
-static void __attribute__((unused))
2509
-tcg_out_opc_vavg_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2510
-{
2511
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_DU, vd, vj, vk));
2512
-}
2513
-
2514
-/* Emits the `vavgr.b vd, vj, vk` instruction. */
2515
-static void __attribute__((unused))
2516
-tcg_out_opc_vavgr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2517
-{
2518
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_B, vd, vj, vk));
2519
-}
2520
-
2521
-/* Emits the `vavgr.h vd, vj, vk` instruction. */
2522
-static void __attribute__((unused))
2523
-tcg_out_opc_vavgr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2524
-{
2525
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_H, vd, vj, vk));
2526
-}
2527
-
2528
-/* Emits the `vavgr.w vd, vj, vk` instruction. */
2529
-static void __attribute__((unused))
2530
-tcg_out_opc_vavgr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2531
-{
2532
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_W, vd, vj, vk));
2533
-}
2534
-
2535
-/* Emits the `vavgr.d vd, vj, vk` instruction. */
2536
-static void __attribute__((unused))
2537
-tcg_out_opc_vavgr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2538
-{
2539
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_D, vd, vj, vk));
2540
-}
2541
-
2542
-/* Emits the `vavgr.bu vd, vj, vk` instruction. */
2543
-static void __attribute__((unused))
2544
-tcg_out_opc_vavgr_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2545
-{
2546
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_BU, vd, vj, vk));
2547
-}
2548
-
2549
-/* Emits the `vavgr.hu vd, vj, vk` instruction. */
2550
-static void __attribute__((unused))
2551
-tcg_out_opc_vavgr_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2552
-{
2553
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_HU, vd, vj, vk));
2554
-}
2555
-
2556
-/* Emits the `vavgr.wu vd, vj, vk` instruction. */
2557
-static void __attribute__((unused))
2558
-tcg_out_opc_vavgr_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2559
-{
2560
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_WU, vd, vj, vk));
2561
-}
2562
-
2563
-/* Emits the `vavgr.du vd, vj, vk` instruction. */
2564
-static void __attribute__((unused))
2565
-tcg_out_opc_vavgr_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2566
-{
2567
- tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_DU, vd, vj, vk));
2568
-}
2569
-
2570
/* Emits the `vmax.b vd, vj, vk` instruction. */
2571
static void __attribute__((unused))
2572
tcg_out_opc_vmax_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2573
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2574
tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_D, vd, vj, vk));
2575
}
2576
2577
-/* Emits the `vmuh.b vd, vj, vk` instruction. */
2578
-static void __attribute__((unused))
2579
-tcg_out_opc_vmuh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2580
-{
2581
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_B, vd, vj, vk));
2582
-}
2583
-
2584
-/* Emits the `vmuh.h vd, vj, vk` instruction. */
2585
-static void __attribute__((unused))
2586
-tcg_out_opc_vmuh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2587
-{
2588
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_H, vd, vj, vk));
2589
-}
2590
-
2591
-/* Emits the `vmuh.w vd, vj, vk` instruction. */
2592
-static void __attribute__((unused))
2593
-tcg_out_opc_vmuh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2594
-{
2595
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_W, vd, vj, vk));
2596
-}
2597
-
2598
-/* Emits the `vmuh.d vd, vj, vk` instruction. */
2599
-static void __attribute__((unused))
2600
-tcg_out_opc_vmuh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2601
-{
2602
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_D, vd, vj, vk));
2603
-}
2604
-
2605
-/* Emits the `vmuh.bu vd, vj, vk` instruction. */
2606
-static void __attribute__((unused))
2607
-tcg_out_opc_vmuh_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2608
-{
2609
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_BU, vd, vj, vk));
2610
-}
2611
-
2612
-/* Emits the `vmuh.hu vd, vj, vk` instruction. */
2613
-static void __attribute__((unused))
2614
-tcg_out_opc_vmuh_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2615
-{
2616
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_HU, vd, vj, vk));
2617
-}
2618
-
2619
-/* Emits the `vmuh.wu vd, vj, vk` instruction. */
2620
-static void __attribute__((unused))
2621
-tcg_out_opc_vmuh_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2622
-{
2623
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_WU, vd, vj, vk));
2624
-}
2625
-
2626
-/* Emits the `vmuh.du vd, vj, vk` instruction. */
2627
-static void __attribute__((unused))
2628
-tcg_out_opc_vmuh_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2629
-{
2630
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_DU, vd, vj, vk));
2631
-}
2632
-
2633
-/* Emits the `vmulwev.h.b vd, vj, vk` instruction. */
2634
-static void __attribute__((unused))
2635
-tcg_out_opc_vmulwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2636
-{
2637
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_B, vd, vj, vk));
2638
-}
2639
-
2640
-/* Emits the `vmulwev.w.h vd, vj, vk` instruction. */
2641
-static void __attribute__((unused))
2642
-tcg_out_opc_vmulwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2643
-{
2644
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_H, vd, vj, vk));
2645
-}
2646
-
2647
-/* Emits the `vmulwev.d.w vd, vj, vk` instruction. */
2648
-static void __attribute__((unused))
2649
-tcg_out_opc_vmulwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2650
-{
2651
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_W, vd, vj, vk));
2652
-}
2653
-
2654
-/* Emits the `vmulwev.q.d vd, vj, vk` instruction. */
2655
-static void __attribute__((unused))
2656
-tcg_out_opc_vmulwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2657
-{
2658
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_D, vd, vj, vk));
2659
-}
2660
-
2661
-/* Emits the `vmulwod.h.b vd, vj, vk` instruction. */
2662
-static void __attribute__((unused))
2663
-tcg_out_opc_vmulwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2664
-{
2665
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_B, vd, vj, vk));
2666
-}
2667
-
2668
-/* Emits the `vmulwod.w.h vd, vj, vk` instruction. */
2669
-static void __attribute__((unused))
2670
-tcg_out_opc_vmulwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2671
-{
2672
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_H, vd, vj, vk));
2673
-}
2674
-
2675
-/* Emits the `vmulwod.d.w vd, vj, vk` instruction. */
2676
-static void __attribute__((unused))
2677
-tcg_out_opc_vmulwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2678
-{
2679
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_W, vd, vj, vk));
2680
-}
2681
-
2682
-/* Emits the `vmulwod.q.d vd, vj, vk` instruction. */
2683
-static void __attribute__((unused))
2684
-tcg_out_opc_vmulwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2685
-{
2686
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_D, vd, vj, vk));
2687
-}
2688
-
2689
-/* Emits the `vmulwev.h.bu vd, vj, vk` instruction. */
2690
-static void __attribute__((unused))
2691
-tcg_out_opc_vmulwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2692
-{
2693
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU, vd, vj, vk));
2694
-}
2695
-
2696
-/* Emits the `vmulwev.w.hu vd, vj, vk` instruction. */
2697
-static void __attribute__((unused))
2698
-tcg_out_opc_vmulwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2699
-{
2700
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU, vd, vj, vk));
2701
-}
2702
-
2703
-/* Emits the `vmulwev.d.wu vd, vj, vk` instruction. */
2704
-static void __attribute__((unused))
2705
-tcg_out_opc_vmulwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2706
-{
2707
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU, vd, vj, vk));
2708
-}
2709
-
2710
-/* Emits the `vmulwev.q.du vd, vj, vk` instruction. */
2711
-static void __attribute__((unused))
2712
-tcg_out_opc_vmulwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2713
-{
2714
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU, vd, vj, vk));
2715
-}
2716
-
2717
-/* Emits the `vmulwod.h.bu vd, vj, vk` instruction. */
2718
-static void __attribute__((unused))
2719
-tcg_out_opc_vmulwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2720
-{
2721
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU, vd, vj, vk));
2722
-}
2723
-
2724
-/* Emits the `vmulwod.w.hu vd, vj, vk` instruction. */
2725
-static void __attribute__((unused))
2726
-tcg_out_opc_vmulwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2727
-{
2728
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU, vd, vj, vk));
2729
-}
2730
-
2731
-/* Emits the `vmulwod.d.wu vd, vj, vk` instruction. */
2732
-static void __attribute__((unused))
2733
-tcg_out_opc_vmulwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2734
-{
2735
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU, vd, vj, vk));
2736
-}
2737
-
2738
-/* Emits the `vmulwod.q.du vd, vj, vk` instruction. */
2739
-static void __attribute__((unused))
2740
-tcg_out_opc_vmulwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2741
-{
2742
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU, vd, vj, vk));
2743
-}
2744
-
2745
-/* Emits the `vmulwev.h.bu.b vd, vj, vk` instruction. */
2746
-static void __attribute__((unused))
2747
-tcg_out_opc_vmulwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2748
-{
2749
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU_B, vd, vj, vk));
2750
-}
2751
-
2752
-/* Emits the `vmulwev.w.hu.h vd, vj, vk` instruction. */
2753
-static void __attribute__((unused))
2754
-tcg_out_opc_vmulwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2755
-{
2756
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU_H, vd, vj, vk));
2757
-}
2758
-
2759
-/* Emits the `vmulwev.d.wu.w vd, vj, vk` instruction. */
2760
-static void __attribute__((unused))
2761
-tcg_out_opc_vmulwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2762
-{
2763
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU_W, vd, vj, vk));
2764
-}
2765
-
2766
-/* Emits the `vmulwev.q.du.d vd, vj, vk` instruction. */
2767
-static void __attribute__((unused))
2768
-tcg_out_opc_vmulwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2769
-{
2770
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU_D, vd, vj, vk));
2771
-}
2772
-
2773
-/* Emits the `vmulwod.h.bu.b vd, vj, vk` instruction. */
2774
-static void __attribute__((unused))
2775
-tcg_out_opc_vmulwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2776
-{
2777
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU_B, vd, vj, vk));
2778
-}
2779
-
2780
-/* Emits the `vmulwod.w.hu.h vd, vj, vk` instruction. */
2781
-static void __attribute__((unused))
2782
-tcg_out_opc_vmulwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2783
-{
2784
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU_H, vd, vj, vk));
2785
-}
2786
-
2787
-/* Emits the `vmulwod.d.wu.w vd, vj, vk` instruction. */
2788
-static void __attribute__((unused))
2789
-tcg_out_opc_vmulwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2790
-{
2791
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU_W, vd, vj, vk));
2792
-}
2793
-
2794
-/* Emits the `vmulwod.q.du.d vd, vj, vk` instruction. */
2795
-static void __attribute__((unused))
2796
-tcg_out_opc_vmulwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2797
-{
2798
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU_D, vd, vj, vk));
2799
-}
2800
-
2801
-/* Emits the `vmadd.b vd, vj, vk` instruction. */
2802
-static void __attribute__((unused))
2803
-tcg_out_opc_vmadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2804
-{
2805
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_B, vd, vj, vk));
2806
-}
2807
-
2808
-/* Emits the `vmadd.h vd, vj, vk` instruction. */
2809
-static void __attribute__((unused))
2810
-tcg_out_opc_vmadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2811
-{
2812
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_H, vd, vj, vk));
2813
-}
2814
-
2815
-/* Emits the `vmadd.w vd, vj, vk` instruction. */
2816
-static void __attribute__((unused))
2817
-tcg_out_opc_vmadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2818
-{
2819
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_W, vd, vj, vk));
2820
-}
2821
-
2822
-/* Emits the `vmadd.d vd, vj, vk` instruction. */
2823
-static void __attribute__((unused))
2824
-tcg_out_opc_vmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2825
-{
2826
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_D, vd, vj, vk));
2827
-}
2828
-
2829
-/* Emits the `vmsub.b vd, vj, vk` instruction. */
2830
-static void __attribute__((unused))
2831
-tcg_out_opc_vmsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2832
-{
2833
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_B, vd, vj, vk));
2834
-}
2835
-
2836
-/* Emits the `vmsub.h vd, vj, vk` instruction. */
2837
-static void __attribute__((unused))
2838
-tcg_out_opc_vmsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2839
-{
2840
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_H, vd, vj, vk));
2841
-}
2842
-
2843
-/* Emits the `vmsub.w vd, vj, vk` instruction. */
2844
-static void __attribute__((unused))
2845
-tcg_out_opc_vmsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2846
-{
2847
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_W, vd, vj, vk));
2848
-}
2849
-
2850
-/* Emits the `vmsub.d vd, vj, vk` instruction. */
2851
-static void __attribute__((unused))
2852
-tcg_out_opc_vmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2853
-{
2854
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_D, vd, vj, vk));
2855
-}
2856
-
2857
-/* Emits the `vmaddwev.h.b vd, vj, vk` instruction. */
2858
-static void __attribute__((unused))
2859
-tcg_out_opc_vmaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2860
-{
2861
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_B, vd, vj, vk));
2862
-}
2863
-
2864
-/* Emits the `vmaddwev.w.h vd, vj, vk` instruction. */
2865
-static void __attribute__((unused))
2866
-tcg_out_opc_vmaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2867
-{
2868
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_H, vd, vj, vk));
2869
-}
2870
-
2871
-/* Emits the `vmaddwev.d.w vd, vj, vk` instruction. */
2872
-static void __attribute__((unused))
2873
-tcg_out_opc_vmaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2874
-{
2875
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_W, vd, vj, vk));
2876
-}
2877
-
2878
-/* Emits the `vmaddwev.q.d vd, vj, vk` instruction. */
2879
-static void __attribute__((unused))
2880
-tcg_out_opc_vmaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2881
-{
2882
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_D, vd, vj, vk));
2883
-}
2884
-
2885
-/* Emits the `vmaddwod.h.b vd, vj, vk` instruction. */
2886
-static void __attribute__((unused))
2887
-tcg_out_opc_vmaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2888
-{
2889
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_B, vd, vj, vk));
2890
-}
2891
-
2892
-/* Emits the `vmaddwod.w.h vd, vj, vk` instruction. */
2893
-static void __attribute__((unused))
2894
-tcg_out_opc_vmaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2895
-{
2896
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_H, vd, vj, vk));
2897
-}
2898
-
2899
-/* Emits the `vmaddwod.d.w vd, vj, vk` instruction. */
2900
-static void __attribute__((unused))
2901
-tcg_out_opc_vmaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2902
-{
2903
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_W, vd, vj, vk));
2904
-}
2905
-
2906
-/* Emits the `vmaddwod.q.d vd, vj, vk` instruction. */
2907
-static void __attribute__((unused))
2908
-tcg_out_opc_vmaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2909
-{
2910
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_D, vd, vj, vk));
2911
-}
2912
-
2913
-/* Emits the `vmaddwev.h.bu vd, vj, vk` instruction. */
2914
-static void __attribute__((unused))
2915
-tcg_out_opc_vmaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2916
-{
2917
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU, vd, vj, vk));
2918
-}
2919
-
2920
-/* Emits the `vmaddwev.w.hu vd, vj, vk` instruction. */
2921
-static void __attribute__((unused))
2922
-tcg_out_opc_vmaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2923
-{
2924
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU, vd, vj, vk));
2925
-}
2926
-
2927
-/* Emits the `vmaddwev.d.wu vd, vj, vk` instruction. */
2928
-static void __attribute__((unused))
2929
-tcg_out_opc_vmaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2930
-{
2931
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU, vd, vj, vk));
2932
-}
2933
-
2934
-/* Emits the `vmaddwev.q.du vd, vj, vk` instruction. */
2935
-static void __attribute__((unused))
2936
-tcg_out_opc_vmaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2937
-{
2938
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU, vd, vj, vk));
2939
-}
2940
-
2941
-/* Emits the `vmaddwod.h.bu vd, vj, vk` instruction. */
2942
-static void __attribute__((unused))
2943
-tcg_out_opc_vmaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2944
-{
2945
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU, vd, vj, vk));
2946
-}
2947
-
2948
-/* Emits the `vmaddwod.w.hu vd, vj, vk` instruction. */
2949
-static void __attribute__((unused))
2950
-tcg_out_opc_vmaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2951
-{
2952
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU, vd, vj, vk));
2953
-}
2954
-
2955
-/* Emits the `vmaddwod.d.wu vd, vj, vk` instruction. */
2956
-static void __attribute__((unused))
2957
-tcg_out_opc_vmaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2958
-{
2959
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU, vd, vj, vk));
2960
-}
2961
-
2962
-/* Emits the `vmaddwod.q.du vd, vj, vk` instruction. */
2963
-static void __attribute__((unused))
2964
-tcg_out_opc_vmaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2965
-{
2966
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU, vd, vj, vk));
2967
-}
2968
-
2969
-/* Emits the `vmaddwev.h.bu.b vd, vj, vk` instruction. */
2970
-static void __attribute__((unused))
2971
-tcg_out_opc_vmaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2972
-{
2973
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU_B, vd, vj, vk));
2974
-}
2975
-
2976
-/* Emits the `vmaddwev.w.hu.h vd, vj, vk` instruction. */
2977
-static void __attribute__((unused))
2978
-tcg_out_opc_vmaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2979
-{
2980
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU_H, vd, vj, vk));
2981
-}
2982
-
2983
-/* Emits the `vmaddwev.d.wu.w vd, vj, vk` instruction. */
2984
-static void __attribute__((unused))
2985
-tcg_out_opc_vmaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2986
-{
2987
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU_W, vd, vj, vk));
2988
-}
2989
-
2990
-/* Emits the `vmaddwev.q.du.d vd, vj, vk` instruction. */
2991
-static void __attribute__((unused))
2992
-tcg_out_opc_vmaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2993
-{
2994
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU_D, vd, vj, vk));
2995
-}
2996
-
2997
-/* Emits the `vmaddwod.h.bu.b vd, vj, vk` instruction. */
2998
-static void __attribute__((unused))
2999
-tcg_out_opc_vmaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3000
-{
3001
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU_B, vd, vj, vk));
3002
-}
3003
-
3004
-/* Emits the `vmaddwod.w.hu.h vd, vj, vk` instruction. */
3005
-static void __attribute__((unused))
3006
-tcg_out_opc_vmaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3007
-{
3008
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU_H, vd, vj, vk));
3009
-}
3010
-
3011
-/* Emits the `vmaddwod.d.wu.w vd, vj, vk` instruction. */
3012
-static void __attribute__((unused))
3013
-tcg_out_opc_vmaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3014
-{
3015
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU_W, vd, vj, vk));
3016
-}
3017
-
3018
-/* Emits the `vmaddwod.q.du.d vd, vj, vk` instruction. */
3019
-static void __attribute__((unused))
3020
-tcg_out_opc_vmaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3021
-{
3022
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU_D, vd, vj, vk));
3023
-}
3024
-
3025
-/* Emits the `vdiv.b vd, vj, vk` instruction. */
3026
-static void __attribute__((unused))
3027
-tcg_out_opc_vdiv_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3028
-{
3029
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_B, vd, vj, vk));
3030
-}
3031
-
3032
-/* Emits the `vdiv.h vd, vj, vk` instruction. */
3033
-static void __attribute__((unused))
3034
-tcg_out_opc_vdiv_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3035
-{
3036
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_H, vd, vj, vk));
3037
-}
3038
-
3039
-/* Emits the `vdiv.w vd, vj, vk` instruction. */
3040
-static void __attribute__((unused))
3041
-tcg_out_opc_vdiv_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3042
-{
3043
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_W, vd, vj, vk));
3044
-}
3045
-
3046
-/* Emits the `vdiv.d vd, vj, vk` instruction. */
3047
-static void __attribute__((unused))
3048
-tcg_out_opc_vdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3049
-{
3050
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_D, vd, vj, vk));
3051
-}
3052
-
3053
-/* Emits the `vmod.b vd, vj, vk` instruction. */
3054
-static void __attribute__((unused))
3055
-tcg_out_opc_vmod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3056
-{
3057
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_B, vd, vj, vk));
3058
-}
3059
-
3060
-/* Emits the `vmod.h vd, vj, vk` instruction. */
3061
-static void __attribute__((unused))
3062
-tcg_out_opc_vmod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3063
-{
3064
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_H, vd, vj, vk));
3065
-}
3066
-
3067
-/* Emits the `vmod.w vd, vj, vk` instruction. */
3068
-static void __attribute__((unused))
3069
-tcg_out_opc_vmod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3070
-{
3071
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_W, vd, vj, vk));
3072
-}
3073
-
3074
-/* Emits the `vmod.d vd, vj, vk` instruction. */
3075
-static void __attribute__((unused))
3076
-tcg_out_opc_vmod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3077
-{
3078
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_D, vd, vj, vk));
3079
-}
3080
-
3081
-/* Emits the `vdiv.bu vd, vj, vk` instruction. */
3082
-static void __attribute__((unused))
3083
-tcg_out_opc_vdiv_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3084
-{
3085
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_BU, vd, vj, vk));
3086
-}
3087
-
3088
-/* Emits the `vdiv.hu vd, vj, vk` instruction. */
3089
-static void __attribute__((unused))
3090
-tcg_out_opc_vdiv_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3091
-{
3092
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_HU, vd, vj, vk));
3093
-}
3094
-
3095
-/* Emits the `vdiv.wu vd, vj, vk` instruction. */
3096
-static void __attribute__((unused))
3097
-tcg_out_opc_vdiv_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3098
-{
3099
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_WU, vd, vj, vk));
3100
-}
3101
-
3102
-/* Emits the `vdiv.du vd, vj, vk` instruction. */
3103
-static void __attribute__((unused))
3104
-tcg_out_opc_vdiv_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3105
-{
3106
- tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_DU, vd, vj, vk));
3107
-}
3108
-
3109
-/* Emits the `vmod.bu vd, vj, vk` instruction. */
3110
-static void __attribute__((unused))
3111
-tcg_out_opc_vmod_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3112
-{
3113
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_BU, vd, vj, vk));
3114
-}
3115
-
3116
-/* Emits the `vmod.hu vd, vj, vk` instruction. */
3117
-static void __attribute__((unused))
3118
-tcg_out_opc_vmod_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3119
-{
3120
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_HU, vd, vj, vk));
3121
-}
3122
-
3123
-/* Emits the `vmod.wu vd, vj, vk` instruction. */
3124
-static void __attribute__((unused))
3125
-tcg_out_opc_vmod_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3126
-{
3127
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_WU, vd, vj, vk));
3128
-}
3129
-
3130
-/* Emits the `vmod.du vd, vj, vk` instruction. */
3131
-static void __attribute__((unused))
3132
-tcg_out_opc_vmod_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3133
-{
3134
- tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_DU, vd, vj, vk));
3135
-}
3136
-
3137
/* Emits the `vsll.b vd, vj, vk` instruction. */
3138
static void __attribute__((unused))
3139
tcg_out_opc_vsll_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3140
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vrotr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3141
tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_D, vd, vj, vk));
3142
}
3143
3144
-/* Emits the `vsrlr.b vd, vj, vk` instruction. */
3145
-static void __attribute__((unused))
3146
-tcg_out_opc_vsrlr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3147
-{
3148
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_B, vd, vj, vk));
3149
-}
3150
-
3151
-/* Emits the `vsrlr.h vd, vj, vk` instruction. */
3152
-static void __attribute__((unused))
3153
-tcg_out_opc_vsrlr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3154
-{
3155
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_H, vd, vj, vk));
3156
-}
3157
-
3158
-/* Emits the `vsrlr.w vd, vj, vk` instruction. */
3159
-static void __attribute__((unused))
3160
-tcg_out_opc_vsrlr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3161
-{
3162
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_W, vd, vj, vk));
3163
-}
3164
-
3165
-/* Emits the `vsrlr.d vd, vj, vk` instruction. */
3166
-static void __attribute__((unused))
3167
-tcg_out_opc_vsrlr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3168
-{
3169
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_D, vd, vj, vk));
3170
-}
3171
-
3172
-/* Emits the `vsrar.b vd, vj, vk` instruction. */
3173
-static void __attribute__((unused))
3174
-tcg_out_opc_vsrar_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3175
-{
3176
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_B, vd, vj, vk));
3177
-}
3178
-
3179
-/* Emits the `vsrar.h vd, vj, vk` instruction. */
3180
-static void __attribute__((unused))
3181
-tcg_out_opc_vsrar_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3182
-{
3183
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_H, vd, vj, vk));
3184
-}
3185
-
3186
-/* Emits the `vsrar.w vd, vj, vk` instruction. */
3187
-static void __attribute__((unused))
3188
-tcg_out_opc_vsrar_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3189
-{
3190
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_W, vd, vj, vk));
3191
-}
3192
-
3193
-/* Emits the `vsrar.d vd, vj, vk` instruction. */
3194
-static void __attribute__((unused))
3195
-tcg_out_opc_vsrar_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3196
-{
3197
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_D, vd, vj, vk));
3198
-}
3199
-
3200
-/* Emits the `vsrln.b.h vd, vj, vk` instruction. */
3201
-static void __attribute__((unused))
3202
-tcg_out_opc_vsrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3203
-{
3204
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_B_H, vd, vj, vk));
3205
-}
3206
-
3207
-/* Emits the `vsrln.h.w vd, vj, vk` instruction. */
3208
-static void __attribute__((unused))
3209
-tcg_out_opc_vsrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3210
-{
3211
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_H_W, vd, vj, vk));
3212
-}
3213
-
3214
-/* Emits the `vsrln.w.d vd, vj, vk` instruction. */
3215
-static void __attribute__((unused))
3216
-tcg_out_opc_vsrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3217
-{
3218
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_W_D, vd, vj, vk));
3219
-}
3220
-
3221
-/* Emits the `vsran.b.h vd, vj, vk` instruction. */
3222
-static void __attribute__((unused))
3223
-tcg_out_opc_vsran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3224
-{
3225
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_B_H, vd, vj, vk));
3226
-}
3227
-
3228
-/* Emits the `vsran.h.w vd, vj, vk` instruction. */
3229
-static void __attribute__((unused))
3230
-tcg_out_opc_vsran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3231
-{
3232
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_H_W, vd, vj, vk));
3233
-}
3234
-
3235
-/* Emits the `vsran.w.d vd, vj, vk` instruction. */
3236
-static void __attribute__((unused))
3237
-tcg_out_opc_vsran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3238
-{
3239
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_W_D, vd, vj, vk));
3240
-}
3241
-
3242
-/* Emits the `vsrlrn.b.h vd, vj, vk` instruction. */
3243
-static void __attribute__((unused))
3244
-tcg_out_opc_vsrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3245
-{
3246
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_B_H, vd, vj, vk));
3247
-}
3248
-
3249
-/* Emits the `vsrlrn.h.w vd, vj, vk` instruction. */
3250
-static void __attribute__((unused))
3251
-tcg_out_opc_vsrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3252
-{
3253
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_H_W, vd, vj, vk));
3254
-}
3255
-
3256
-/* Emits the `vsrlrn.w.d vd, vj, vk` instruction. */
3257
-static void __attribute__((unused))
3258
-tcg_out_opc_vsrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3259
-{
3260
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_W_D, vd, vj, vk));
3261
-}
3262
-
3263
-/* Emits the `vsrarn.b.h vd, vj, vk` instruction. */
3264
-static void __attribute__((unused))
3265
-tcg_out_opc_vsrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3266
-{
3267
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_B_H, vd, vj, vk));
3268
-}
3269
-
3270
-/* Emits the `vsrarn.h.w vd, vj, vk` instruction. */
3271
-static void __attribute__((unused))
3272
-tcg_out_opc_vsrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3273
-{
3274
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_H_W, vd, vj, vk));
3275
-}
3276
-
3277
-/* Emits the `vsrarn.w.d vd, vj, vk` instruction. */
3278
-static void __attribute__((unused))
3279
-tcg_out_opc_vsrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3280
-{
3281
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_W_D, vd, vj, vk));
3282
-}
3283
-
3284
-/* Emits the `vssrln.b.h vd, vj, vk` instruction. */
3285
-static void __attribute__((unused))
3286
-tcg_out_opc_vssrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3287
-{
3288
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_B_H, vd, vj, vk));
3289
-}
3290
-
3291
-/* Emits the `vssrln.h.w vd, vj, vk` instruction. */
3292
-static void __attribute__((unused))
3293
-tcg_out_opc_vssrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3294
-{
3295
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_H_W, vd, vj, vk));
3296
-}
3297
-
3298
-/* Emits the `vssrln.w.d vd, vj, vk` instruction. */
3299
-static void __attribute__((unused))
3300
-tcg_out_opc_vssrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3301
-{
3302
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_W_D, vd, vj, vk));
3303
-}
3304
-
3305
-/* Emits the `vssran.b.h vd, vj, vk` instruction. */
3306
-static void __attribute__((unused))
3307
-tcg_out_opc_vssran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3308
-{
3309
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_B_H, vd, vj, vk));
3310
-}
3311
-
3312
-/* Emits the `vssran.h.w vd, vj, vk` instruction. */
3313
-static void __attribute__((unused))
3314
-tcg_out_opc_vssran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3315
-{
3316
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_H_W, vd, vj, vk));
3317
-}
3318
-
3319
-/* Emits the `vssran.w.d vd, vj, vk` instruction. */
3320
-static void __attribute__((unused))
3321
-tcg_out_opc_vssran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3322
-{
3323
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_W_D, vd, vj, vk));
3324
-}
3325
-
3326
-/* Emits the `vssrlrn.b.h vd, vj, vk` instruction. */
3327
-static void __attribute__((unused))
3328
-tcg_out_opc_vssrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3329
-{
3330
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_B_H, vd, vj, vk));
3331
-}
3332
-
3333
-/* Emits the `vssrlrn.h.w vd, vj, vk` instruction. */
3334
-static void __attribute__((unused))
3335
-tcg_out_opc_vssrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3336
-{
3337
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_H_W, vd, vj, vk));
3338
-}
3339
-
3340
-/* Emits the `vssrlrn.w.d vd, vj, vk` instruction. */
3341
-static void __attribute__((unused))
3342
-tcg_out_opc_vssrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3343
-{
3344
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_W_D, vd, vj, vk));
3345
-}
3346
-
3347
-/* Emits the `vssrarn.b.h vd, vj, vk` instruction. */
3348
-static void __attribute__((unused))
3349
-tcg_out_opc_vssrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3350
-{
3351
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_B_H, vd, vj, vk));
3352
-}
3353
-
3354
-/* Emits the `vssrarn.h.w vd, vj, vk` instruction. */
3355
-static void __attribute__((unused))
3356
-tcg_out_opc_vssrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3357
-{
3358
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_H_W, vd, vj, vk));
3359
-}
3360
-
3361
-/* Emits the `vssrarn.w.d vd, vj, vk` instruction. */
3362
-static void __attribute__((unused))
3363
-tcg_out_opc_vssrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3364
-{
3365
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_W_D, vd, vj, vk));
3366
-}
3367
-
3368
-/* Emits the `vssrln.bu.h vd, vj, vk` instruction. */
3369
-static void __attribute__((unused))
3370
-tcg_out_opc_vssrln_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3371
-{
3372
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_BU_H, vd, vj, vk));
3373
-}
3374
-
3375
-/* Emits the `vssrln.hu.w vd, vj, vk` instruction. */
3376
-static void __attribute__((unused))
3377
-tcg_out_opc_vssrln_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3378
-{
3379
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_HU_W, vd, vj, vk));
3380
-}
3381
-
3382
-/* Emits the `vssrln.wu.d vd, vj, vk` instruction. */
3383
-static void __attribute__((unused))
3384
-tcg_out_opc_vssrln_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3385
-{
3386
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_WU_D, vd, vj, vk));
3387
-}
3388
-
3389
-/* Emits the `vssran.bu.h vd, vj, vk` instruction. */
3390
-static void __attribute__((unused))
3391
-tcg_out_opc_vssran_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3392
-{
3393
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_BU_H, vd, vj, vk));
3394
-}
3395
-
3396
-/* Emits the `vssran.hu.w vd, vj, vk` instruction. */
3397
-static void __attribute__((unused))
3398
-tcg_out_opc_vssran_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3399
-{
3400
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_HU_W, vd, vj, vk));
3401
-}
3402
-
3403
-/* Emits the `vssran.wu.d vd, vj, vk` instruction. */
3404
-static void __attribute__((unused))
3405
-tcg_out_opc_vssran_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3406
-{
3407
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_WU_D, vd, vj, vk));
3408
-}
3409
-
3410
-/* Emits the `vssrlrn.bu.h vd, vj, vk` instruction. */
3411
-static void __attribute__((unused))
3412
-tcg_out_opc_vssrlrn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3413
-{
3414
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_BU_H, vd, vj, vk));
3415
-}
3416
-
3417
-/* Emits the `vssrlrn.hu.w vd, vj, vk` instruction. */
3418
-static void __attribute__((unused))
3419
-tcg_out_opc_vssrlrn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3420
-{
3421
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_HU_W, vd, vj, vk));
3422
-}
3423
-
3424
-/* Emits the `vssrlrn.wu.d vd, vj, vk` instruction. */
3425
-static void __attribute__((unused))
3426
-tcg_out_opc_vssrlrn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3427
-{
3428
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_WU_D, vd, vj, vk));
3429
-}
3430
-
3431
-/* Emits the `vssrarn.bu.h vd, vj, vk` instruction. */
3432
-static void __attribute__((unused))
3433
-tcg_out_opc_vssrarn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3434
-{
3435
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_BU_H, vd, vj, vk));
3436
-}
3437
-
3438
-/* Emits the `vssrarn.hu.w vd, vj, vk` instruction. */
3439
-static void __attribute__((unused))
3440
-tcg_out_opc_vssrarn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3441
-{
3442
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_HU_W, vd, vj, vk));
3443
-}
3444
-
3445
-/* Emits the `vssrarn.wu.d vd, vj, vk` instruction. */
3446
-static void __attribute__((unused))
3447
-tcg_out_opc_vssrarn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3448
-{
3449
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_WU_D, vd, vj, vk));
3450
-}
3451
-
3452
-/* Emits the `vbitclr.b vd, vj, vk` instruction. */
3453
-static void __attribute__((unused))
3454
-tcg_out_opc_vbitclr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3455
-{
3456
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_B, vd, vj, vk));
3457
-}
3458
-
3459
-/* Emits the `vbitclr.h vd, vj, vk` instruction. */
3460
-static void __attribute__((unused))
3461
-tcg_out_opc_vbitclr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3462
-{
3463
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_H, vd, vj, vk));
3464
-}
3465
-
3466
-/* Emits the `vbitclr.w vd, vj, vk` instruction. */
3467
-static void __attribute__((unused))
3468
-tcg_out_opc_vbitclr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3469
-{
3470
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_W, vd, vj, vk));
3471
-}
3472
-
3473
-/* Emits the `vbitclr.d vd, vj, vk` instruction. */
3474
-static void __attribute__((unused))
3475
-tcg_out_opc_vbitclr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3476
-{
3477
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_D, vd, vj, vk));
3478
-}
3479
-
3480
-/* Emits the `vbitset.b vd, vj, vk` instruction. */
3481
-static void __attribute__((unused))
3482
-tcg_out_opc_vbitset_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3483
-{
3484
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_B, vd, vj, vk));
3485
-}
3486
-
3487
-/* Emits the `vbitset.h vd, vj, vk` instruction. */
3488
-static void __attribute__((unused))
3489
-tcg_out_opc_vbitset_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3490
-{
3491
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_H, vd, vj, vk));
3492
-}
3493
-
3494
-/* Emits the `vbitset.w vd, vj, vk` instruction. */
3495
-static void __attribute__((unused))
3496
-tcg_out_opc_vbitset_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3497
-{
3498
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_W, vd, vj, vk));
3499
-}
3500
-
3501
-/* Emits the `vbitset.d vd, vj, vk` instruction. */
3502
-static void __attribute__((unused))
3503
-tcg_out_opc_vbitset_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3504
-{
3505
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_D, vd, vj, vk));
3506
-}
3507
-
3508
-/* Emits the `vbitrev.b vd, vj, vk` instruction. */
3509
-static void __attribute__((unused))
3510
-tcg_out_opc_vbitrev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3511
-{
3512
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_B, vd, vj, vk));
3513
-}
3514
-
3515
-/* Emits the `vbitrev.h vd, vj, vk` instruction. */
3516
-static void __attribute__((unused))
3517
-tcg_out_opc_vbitrev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3518
-{
3519
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_H, vd, vj, vk));
3520
-}
3521
-
3522
-/* Emits the `vbitrev.w vd, vj, vk` instruction. */
3523
-static void __attribute__((unused))
3524
-tcg_out_opc_vbitrev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3525
-{
3526
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_W, vd, vj, vk));
3527
-}
3528
-
3529
-/* Emits the `vbitrev.d vd, vj, vk` instruction. */
3530
-static void __attribute__((unused))
3531
-tcg_out_opc_vbitrev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3532
-{
3533
- tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_D, vd, vj, vk));
3534
-}
3535
-
3536
-/* Emits the `vpackev.b vd, vj, vk` instruction. */
3537
-static void __attribute__((unused))
3538
-tcg_out_opc_vpackev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3539
-{
3540
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_B, vd, vj, vk));
3541
-}
3542
-
3543
-/* Emits the `vpackev.h vd, vj, vk` instruction. */
3544
-static void __attribute__((unused))
3545
-tcg_out_opc_vpackev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3546
-{
3547
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_H, vd, vj, vk));
3548
-}
3549
-
3550
-/* Emits the `vpackev.w vd, vj, vk` instruction. */
3551
-static void __attribute__((unused))
3552
-tcg_out_opc_vpackev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3553
-{
3554
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_W, vd, vj, vk));
3555
-}
3556
-
3557
-/* Emits the `vpackev.d vd, vj, vk` instruction. */
3558
-static void __attribute__((unused))
3559
-tcg_out_opc_vpackev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3560
-{
3561
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_D, vd, vj, vk));
3562
-}
3563
-
3564
-/* Emits the `vpackod.b vd, vj, vk` instruction. */
3565
-static void __attribute__((unused))
3566
-tcg_out_opc_vpackod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3567
-{
3568
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_B, vd, vj, vk));
3569
-}
3570
-
3571
-/* Emits the `vpackod.h vd, vj, vk` instruction. */
3572
-static void __attribute__((unused))
3573
-tcg_out_opc_vpackod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3574
-{
3575
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_H, vd, vj, vk));
3576
-}
3577
-
3578
-/* Emits the `vpackod.w vd, vj, vk` instruction. */
3579
-static void __attribute__((unused))
3580
-tcg_out_opc_vpackod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3581
-{
3582
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_W, vd, vj, vk));
3583
-}
3584
-
3585
-/* Emits the `vpackod.d vd, vj, vk` instruction. */
3586
-static void __attribute__((unused))
3587
-tcg_out_opc_vpackod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3588
-{
3589
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_D, vd, vj, vk));
3590
-}
3591
-
3592
-/* Emits the `vilvl.b vd, vj, vk` instruction. */
3593
-static void __attribute__((unused))
3594
-tcg_out_opc_vilvl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3595
-{
3596
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_B, vd, vj, vk));
3597
-}
3598
-
3599
-/* Emits the `vilvl.h vd, vj, vk` instruction. */
3600
-static void __attribute__((unused))
3601
-tcg_out_opc_vilvl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3602
-{
3603
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_H, vd, vj, vk));
3604
-}
3605
-
3606
-/* Emits the `vilvl.w vd, vj, vk` instruction. */
3607
-static void __attribute__((unused))
3608
-tcg_out_opc_vilvl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3609
-{
3610
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_W, vd, vj, vk));
3611
-}
3612
-
3613
-/* Emits the `vilvl.d vd, vj, vk` instruction. */
3614
-static void __attribute__((unused))
3615
-tcg_out_opc_vilvl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3616
-{
3617
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_D, vd, vj, vk));
3618
-}
3619
-
3620
-/* Emits the `vilvh.b vd, vj, vk` instruction. */
3621
-static void __attribute__((unused))
3622
-tcg_out_opc_vilvh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3623
-{
3624
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_B, vd, vj, vk));
3625
-}
3626
-
3627
-/* Emits the `vilvh.h vd, vj, vk` instruction. */
3628
-static void __attribute__((unused))
3629
-tcg_out_opc_vilvh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3630
-{
3631
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_H, vd, vj, vk));
3632
-}
3633
-
3634
-/* Emits the `vilvh.w vd, vj, vk` instruction. */
3635
-static void __attribute__((unused))
3636
-tcg_out_opc_vilvh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3637
-{
3638
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_W, vd, vj, vk));
3639
-}
3640
-
3641
-/* Emits the `vilvh.d vd, vj, vk` instruction. */
3642
-static void __attribute__((unused))
3643
-tcg_out_opc_vilvh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3644
-{
3645
- tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_D, vd, vj, vk));
3646
-}
3647
-
3648
-/* Emits the `vpickev.b vd, vj, vk` instruction. */
3649
-static void __attribute__((unused))
3650
-tcg_out_opc_vpickev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3651
-{
3652
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_B, vd, vj, vk));
3653
-}
3654
-
3655
-/* Emits the `vpickev.h vd, vj, vk` instruction. */
3656
-static void __attribute__((unused))
3657
-tcg_out_opc_vpickev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3658
-{
3659
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_H, vd, vj, vk));
3660
-}
3661
-
3662
-/* Emits the `vpickev.w vd, vj, vk` instruction. */
3663
-static void __attribute__((unused))
3664
-tcg_out_opc_vpickev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3665
-{
3666
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_W, vd, vj, vk));
3667
-}
3668
-
3669
-/* Emits the `vpickev.d vd, vj, vk` instruction. */
3670
-static void __attribute__((unused))
3671
-tcg_out_opc_vpickev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3672
-{
3673
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_D, vd, vj, vk));
3674
-}
3675
-
3676
-/* Emits the `vpickod.b vd, vj, vk` instruction. */
3677
-static void __attribute__((unused))
3678
-tcg_out_opc_vpickod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3679
-{
3680
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_B, vd, vj, vk));
3681
-}
3682
-
3683
-/* Emits the `vpickod.h vd, vj, vk` instruction. */
3684
-static void __attribute__((unused))
3685
-tcg_out_opc_vpickod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3686
-{
3687
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_H, vd, vj, vk));
3688
-}
3689
-
3690
-/* Emits the `vpickod.w vd, vj, vk` instruction. */
3691
-static void __attribute__((unused))
3692
-tcg_out_opc_vpickod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3693
-{
3694
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_W, vd, vj, vk));
3695
-}
3696
-
3697
-/* Emits the `vpickod.d vd, vj, vk` instruction. */
3698
-static void __attribute__((unused))
3699
-tcg_out_opc_vpickod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3700
-{
3701
- tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_D, vd, vj, vk));
3702
-}
3703
-
3704
/* Emits the `vreplve.b vd, vj, k` instruction. */
3705
static void __attribute__((unused))
3706
tcg_out_opc_vreplve_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3707
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vorn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3708
tcg_out32(s, encode_vdvjvk_insn(OPC_VORN_V, vd, vj, vk));
3709
}
3710
3711
-/* Emits the `vfrstp.b vd, vj, vk` instruction. */
3712
-static void __attribute__((unused))
3713
-tcg_out_opc_vfrstp_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3714
-{
3715
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_B, vd, vj, vk));
3716
-}
3717
-
3718
-/* Emits the `vfrstp.h vd, vj, vk` instruction. */
3719
-static void __attribute__((unused))
3720
-tcg_out_opc_vfrstp_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3721
-{
3722
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_H, vd, vj, vk));
3723
-}
3724
-
3725
-/* Emits the `vadd.q vd, vj, vk` instruction. */
3726
-static void __attribute__((unused))
3727
-tcg_out_opc_vadd_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3728
-{
3729
- tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_Q, vd, vj, vk));
3730
-}
3731
-
3732
-/* Emits the `vsub.q vd, vj, vk` instruction. */
3733
-static void __attribute__((unused))
3734
-tcg_out_opc_vsub_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3735
-{
3736
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_Q, vd, vj, vk));
3737
-}
3738
-
3739
-/* Emits the `vsigncov.b vd, vj, vk` instruction. */
3740
-static void __attribute__((unused))
3741
-tcg_out_opc_vsigncov_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3742
-{
3743
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_B, vd, vj, vk));
3744
-}
3745
-
3746
-/* Emits the `vsigncov.h vd, vj, vk` instruction. */
3747
-static void __attribute__((unused))
3748
-tcg_out_opc_vsigncov_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3749
-{
3750
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_H, vd, vj, vk));
3751
-}
3752
-
3753
-/* Emits the `vsigncov.w vd, vj, vk` instruction. */
3754
-static void __attribute__((unused))
3755
-tcg_out_opc_vsigncov_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3756
-{
3757
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_W, vd, vj, vk));
3758
-}
3759
-
3760
-/* Emits the `vsigncov.d vd, vj, vk` instruction. */
3761
-static void __attribute__((unused))
3762
-tcg_out_opc_vsigncov_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3763
-{
3764
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_D, vd, vj, vk));
3765
-}
3766
-
3767
-/* Emits the `vfadd.s vd, vj, vk` instruction. */
3768
-static void __attribute__((unused))
3769
-tcg_out_opc_vfadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3770
-{
3771
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_S, vd, vj, vk));
3772
-}
3773
-
3774
-/* Emits the `vfadd.d vd, vj, vk` instruction. */
3775
-static void __attribute__((unused))
3776
-tcg_out_opc_vfadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3777
-{
3778
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_D, vd, vj, vk));
3779
-}
3780
-
3781
-/* Emits the `vfsub.s vd, vj, vk` instruction. */
3782
-static void __attribute__((unused))
3783
-tcg_out_opc_vfsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3784
-{
3785
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_S, vd, vj, vk));
3786
-}
3787
-
3788
-/* Emits the `vfsub.d vd, vj, vk` instruction. */
3789
-static void __attribute__((unused))
3790
-tcg_out_opc_vfsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3791
-{
3792
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_D, vd, vj, vk));
3793
-}
3794
-
3795
-/* Emits the `vfmul.s vd, vj, vk` instruction. */
3796
-static void __attribute__((unused))
3797
-tcg_out_opc_vfmul_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3798
-{
3799
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_S, vd, vj, vk));
3800
-}
3801
-
3802
-/* Emits the `vfmul.d vd, vj, vk` instruction. */
3803
-static void __attribute__((unused))
3804
-tcg_out_opc_vfmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3805
-{
3806
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_D, vd, vj, vk));
3807
-}
3808
-
3809
-/* Emits the `vfdiv.s vd, vj, vk` instruction. */
3810
-static void __attribute__((unused))
3811
-tcg_out_opc_vfdiv_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3812
-{
3813
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_S, vd, vj, vk));
3814
-}
3815
-
3816
-/* Emits the `vfdiv.d vd, vj, vk` instruction. */
3817
-static void __attribute__((unused))
3818
-tcg_out_opc_vfdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3819
-{
3820
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_D, vd, vj, vk));
3821
-}
3822
-
3823
-/* Emits the `vfmax.s vd, vj, vk` instruction. */
3824
-static void __attribute__((unused))
3825
-tcg_out_opc_vfmax_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3826
-{
3827
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_S, vd, vj, vk));
3828
-}
3829
-
3830
-/* Emits the `vfmax.d vd, vj, vk` instruction. */
3831
-static void __attribute__((unused))
3832
-tcg_out_opc_vfmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3833
-{
3834
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_D, vd, vj, vk));
3835
-}
3836
-
3837
-/* Emits the `vfmin.s vd, vj, vk` instruction. */
3838
-static void __attribute__((unused))
3839
-tcg_out_opc_vfmin_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3840
-{
3841
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_S, vd, vj, vk));
3842
-}
3843
-
3844
-/* Emits the `vfmin.d vd, vj, vk` instruction. */
3845
-static void __attribute__((unused))
3846
-tcg_out_opc_vfmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3847
-{
3848
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_D, vd, vj, vk));
3849
-}
3850
-
3851
-/* Emits the `vfmaxa.s vd, vj, vk` instruction. */
3852
-static void __attribute__((unused))
3853
-tcg_out_opc_vfmaxa_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3854
-{
3855
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_S, vd, vj, vk));
3856
-}
3857
-
3858
-/* Emits the `vfmaxa.d vd, vj, vk` instruction. */
3859
-static void __attribute__((unused))
3860
-tcg_out_opc_vfmaxa_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3861
-{
3862
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_D, vd, vj, vk));
3863
-}
3864
-
3865
-/* Emits the `vfmina.s vd, vj, vk` instruction. */
3866
-static void __attribute__((unused))
3867
-tcg_out_opc_vfmina_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3868
-{
3869
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_S, vd, vj, vk));
3870
-}
3871
-
3872
-/* Emits the `vfmina.d vd, vj, vk` instruction. */
3873
-static void __attribute__((unused))
3874
-tcg_out_opc_vfmina_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3875
-{
3876
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_D, vd, vj, vk));
3877
-}
3878
-
3879
-/* Emits the `vfcvt.h.s vd, vj, vk` instruction. */
3880
-static void __attribute__((unused))
3881
-tcg_out_opc_vfcvt_h_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3882
-{
3883
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_H_S, vd, vj, vk));
3884
-}
3885
-
3886
-/* Emits the `vfcvt.s.d vd, vj, vk` instruction. */
3887
-static void __attribute__((unused))
3888
-tcg_out_opc_vfcvt_s_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3889
-{
3890
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_S_D, vd, vj, vk));
3891
-}
3892
-
3893
-/* Emits the `vffint.s.l vd, vj, vk` instruction. */
3894
-static void __attribute__((unused))
3895
-tcg_out_opc_vffint_s_l(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3896
-{
3897
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFFINT_S_L, vd, vj, vk));
3898
-}
3899
-
3900
-/* Emits the `vftint.w.d vd, vj, vk` instruction. */
3901
-static void __attribute__((unused))
3902
-tcg_out_opc_vftint_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3903
-{
3904
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINT_W_D, vd, vj, vk));
3905
-}
3906
-
3907
-/* Emits the `vftintrm.w.d vd, vj, vk` instruction. */
3908
-static void __attribute__((unused))
3909
-tcg_out_opc_vftintrm_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3910
-{
3911
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRM_W_D, vd, vj, vk));
3912
-}
3913
-
3914
-/* Emits the `vftintrp.w.d vd, vj, vk` instruction. */
3915
-static void __attribute__((unused))
3916
-tcg_out_opc_vftintrp_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3917
-{
3918
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRP_W_D, vd, vj, vk));
3919
-}
3920
-
3921
-/* Emits the `vftintrz.w.d vd, vj, vk` instruction. */
3922
-static void __attribute__((unused))
3923
-tcg_out_opc_vftintrz_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3924
-{
3925
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRZ_W_D, vd, vj, vk));
3926
-}
3927
-
3928
-/* Emits the `vftintrne.w.d vd, vj, vk` instruction. */
3929
-static void __attribute__((unused))
3930
-tcg_out_opc_vftintrne_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3931
-{
3932
- tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRNE_W_D, vd, vj, vk));
3933
-}
3934
-
3935
-/* Emits the `vshuf.h vd, vj, vk` instruction. */
3936
-static void __attribute__((unused))
3937
-tcg_out_opc_vshuf_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3938
-{
3939
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_H, vd, vj, vk));
3940
-}
3941
-
3942
-/* Emits the `vshuf.w vd, vj, vk` instruction. */
3943
-static void __attribute__((unused))
3944
-tcg_out_opc_vshuf_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3945
-{
3946
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_W, vd, vj, vk));
3947
-}
3948
-
3949
-/* Emits the `vshuf.d vd, vj, vk` instruction. */
3950
-static void __attribute__((unused))
3951
-tcg_out_opc_vshuf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3952
-{
3953
- tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_D, vd, vj, vk));
3954
-}
3955
-
3956
/* Emits the `vseqi.b vd, vj, sk5` instruction. */
3957
static void __attribute__((unused))
3958
tcg_out_opc_vseqi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
3959
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vsubi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
3960
tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_DU, vd, vj, uk5));
3961
}
3962
3963
-/* Emits the `vbsll.v vd, vj, uk5` instruction. */
3964
-static void __attribute__((unused))
3965
-tcg_out_opc_vbsll_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
3966
-{
3967
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSLL_V, vd, vj, uk5));
3968
-}
3969
-
3970
-/* Emits the `vbsrl.v vd, vj, uk5` instruction. */
3971
-static void __attribute__((unused))
3972
-tcg_out_opc_vbsrl_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
3973
-{
3974
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSRL_V, vd, vj, uk5));
3975
-}
3976
-
3977
/* Emits the `vmaxi.b vd, vj, sk5` instruction. */
3978
static void __attribute__((unused))
3979
tcg_out_opc_vmaxi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
3980
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vmini_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
3981
tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_DU, vd, vj, uk5));
3982
}
3983
3984
-/* Emits the `vfrstpi.b vd, vj, uk5` instruction. */
3985
-static void __attribute__((unused))
3986
-tcg_out_opc_vfrstpi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
3987
-{
3988
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_B, vd, vj, uk5));
3989
-}
3990
-
3991
-/* Emits the `vfrstpi.h vd, vj, uk5` instruction. */
3992
-static void __attribute__((unused))
3993
-tcg_out_opc_vfrstpi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
3994
-{
3995
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_H, vd, vj, uk5));
3996
-}
3997
-
3998
-/* Emits the `vclo.b vd, vj` instruction. */
3999
-static void __attribute__((unused))
4000
-tcg_out_opc_vclo_b(TCGContext *s, TCGReg vd, TCGReg vj)
4001
-{
4002
- tcg_out32(s, encode_vdvj_insn(OPC_VCLO_B, vd, vj));
4003
-}
4004
-
4005
-/* Emits the `vclo.h vd, vj` instruction. */
4006
-static void __attribute__((unused))
4007
-tcg_out_opc_vclo_h(TCGContext *s, TCGReg vd, TCGReg vj)
4008
-{
4009
- tcg_out32(s, encode_vdvj_insn(OPC_VCLO_H, vd, vj));
4010
-}
4011
-
4012
-/* Emits the `vclo.w vd, vj` instruction. */
4013
-static void __attribute__((unused))
4014
-tcg_out_opc_vclo_w(TCGContext *s, TCGReg vd, TCGReg vj)
4015
-{
4016
- tcg_out32(s, encode_vdvj_insn(OPC_VCLO_W, vd, vj));
4017
-}
4018
-
4019
-/* Emits the `vclo.d vd, vj` instruction. */
4020
-static void __attribute__((unused))
4021
-tcg_out_opc_vclo_d(TCGContext *s, TCGReg vd, TCGReg vj)
4022
-{
4023
- tcg_out32(s, encode_vdvj_insn(OPC_VCLO_D, vd, vj));
4024
-}
4025
-
4026
-/* Emits the `vclz.b vd, vj` instruction. */
4027
-static void __attribute__((unused))
4028
-tcg_out_opc_vclz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4029
-{
4030
- tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_B, vd, vj));
4031
-}
4032
-
4033
-/* Emits the `vclz.h vd, vj` instruction. */
4034
-static void __attribute__((unused))
4035
-tcg_out_opc_vclz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4036
-{
4037
- tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_H, vd, vj));
4038
-}
4039
-
4040
-/* Emits the `vclz.w vd, vj` instruction. */
4041
-static void __attribute__((unused))
4042
-tcg_out_opc_vclz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4043
-{
4044
- tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_W, vd, vj));
4045
-}
4046
-
4047
-/* Emits the `vclz.d vd, vj` instruction. */
4048
-static void __attribute__((unused))
4049
-tcg_out_opc_vclz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4050
-{
4051
- tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_D, vd, vj));
4052
-}
4053
-
4054
-/* Emits the `vpcnt.b vd, vj` instruction. */
4055
-static void __attribute__((unused))
4056
-tcg_out_opc_vpcnt_b(TCGContext *s, TCGReg vd, TCGReg vj)
4057
-{
4058
- tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_B, vd, vj));
4059
-}
4060
-
4061
-/* Emits the `vpcnt.h vd, vj` instruction. */
4062
-static void __attribute__((unused))
4063
-tcg_out_opc_vpcnt_h(TCGContext *s, TCGReg vd, TCGReg vj)
4064
-{
4065
- tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_H, vd, vj));
4066
-}
4067
-
4068
-/* Emits the `vpcnt.w vd, vj` instruction. */
4069
-static void __attribute__((unused))
4070
-tcg_out_opc_vpcnt_w(TCGContext *s, TCGReg vd, TCGReg vj)
4071
-{
4072
- tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_W, vd, vj));
4073
-}
4074
-
4075
-/* Emits the `vpcnt.d vd, vj` instruction. */
4076
-static void __attribute__((unused))
4077
-tcg_out_opc_vpcnt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4078
-{
4079
- tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_D, vd, vj));
4080
-}
4081
-
4082
/* Emits the `vneg.b vd, vj` instruction. */
4083
static void __attribute__((unused))
4084
tcg_out_opc_vneg_b(TCGContext *s, TCGReg vd, TCGReg vj)
4085
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vneg_d(TCGContext *s, TCGReg vd, TCGReg vj)
4086
tcg_out32(s, encode_vdvj_insn(OPC_VNEG_D, vd, vj));
4087
}
4088
4089
-/* Emits the `vmskltz.b vd, vj` instruction. */
4090
-static void __attribute__((unused))
4091
-tcg_out_opc_vmskltz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4092
-{
4093
- tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_B, vd, vj));
4094
-}
4095
-
4096
-/* Emits the `vmskltz.h vd, vj` instruction. */
4097
-static void __attribute__((unused))
4098
-tcg_out_opc_vmskltz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4099
-{
4100
- tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_H, vd, vj));
4101
-}
4102
-
4103
-/* Emits the `vmskltz.w vd, vj` instruction. */
4104
-static void __attribute__((unused))
4105
-tcg_out_opc_vmskltz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4106
-{
4107
- tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_W, vd, vj));
4108
-}
4109
-
4110
-/* Emits the `vmskltz.d vd, vj` instruction. */
4111
-static void __attribute__((unused))
4112
-tcg_out_opc_vmskltz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4113
-{
4114
- tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_D, vd, vj));
4115
-}
4116
-
4117
-/* Emits the `vmskgez.b vd, vj` instruction. */
4118
-static void __attribute__((unused))
4119
-tcg_out_opc_vmskgez_b(TCGContext *s, TCGReg vd, TCGReg vj)
4120
-{
4121
- tcg_out32(s, encode_vdvj_insn(OPC_VMSKGEZ_B, vd, vj));
4122
-}
4123
-
4124
-/* Emits the `vmsknz.b vd, vj` instruction. */
4125
-static void __attribute__((unused))
4126
-tcg_out_opc_vmsknz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4127
-{
4128
- tcg_out32(s, encode_vdvj_insn(OPC_VMSKNZ_B, vd, vj));
4129
-}
4130
-
4131
-/* Emits the `vseteqz.v cd, vj` instruction. */
4132
-static void __attribute__((unused))
4133
-tcg_out_opc_vseteqz_v(TCGContext *s, TCGReg cd, TCGReg vj)
4134
-{
4135
- tcg_out32(s, encode_cdvj_insn(OPC_VSETEQZ_V, cd, vj));
4136
-}
4137
-
4138
-/* Emits the `vsetnez.v cd, vj` instruction. */
4139
-static void __attribute__((unused))
4140
-tcg_out_opc_vsetnez_v(TCGContext *s, TCGReg cd, TCGReg vj)
4141
-{
4142
- tcg_out32(s, encode_cdvj_insn(OPC_VSETNEZ_V, cd, vj));
4143
-}
4144
-
4145
-/* Emits the `vsetanyeqz.b cd, vj` instruction. */
4146
-static void __attribute__((unused))
4147
-tcg_out_opc_vsetanyeqz_b(TCGContext *s, TCGReg cd, TCGReg vj)
4148
-{
4149
- tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_B, cd, vj));
4150
-}
4151
-
4152
-/* Emits the `vsetanyeqz.h cd, vj` instruction. */
4153
-static void __attribute__((unused))
4154
-tcg_out_opc_vsetanyeqz_h(TCGContext *s, TCGReg cd, TCGReg vj)
4155
-{
4156
- tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_H, cd, vj));
4157
-}
4158
-
4159
-/* Emits the `vsetanyeqz.w cd, vj` instruction. */
4160
-static void __attribute__((unused))
4161
-tcg_out_opc_vsetanyeqz_w(TCGContext *s, TCGReg cd, TCGReg vj)
4162
-{
4163
- tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_W, cd, vj));
4164
-}
4165
-
4166
-/* Emits the `vsetanyeqz.d cd, vj` instruction. */
4167
-static void __attribute__((unused))
4168
-tcg_out_opc_vsetanyeqz_d(TCGContext *s, TCGReg cd, TCGReg vj)
4169
-{
4170
- tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_D, cd, vj));
4171
-}
4172
-
4173
-/* Emits the `vsetallnez.b cd, vj` instruction. */
4174
-static void __attribute__((unused))
4175
-tcg_out_opc_vsetallnez_b(TCGContext *s, TCGReg cd, TCGReg vj)
4176
-{
4177
- tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_B, cd, vj));
4178
-}
4179
-
4180
-/* Emits the `vsetallnez.h cd, vj` instruction. */
4181
-static void __attribute__((unused))
4182
-tcg_out_opc_vsetallnez_h(TCGContext *s, TCGReg cd, TCGReg vj)
4183
-{
4184
- tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_H, cd, vj));
4185
-}
4186
-
4187
-/* Emits the `vsetallnez.w cd, vj` instruction. */
4188
-static void __attribute__((unused))
4189
-tcg_out_opc_vsetallnez_w(TCGContext *s, TCGReg cd, TCGReg vj)
4190
-{
4191
- tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_W, cd, vj));
4192
-}
4193
-
4194
-/* Emits the `vsetallnez.d cd, vj` instruction. */
4195
-static void __attribute__((unused))
4196
-tcg_out_opc_vsetallnez_d(TCGContext *s, TCGReg cd, TCGReg vj)
4197
-{
4198
- tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_D, cd, vj));
4199
-}
4200
-
4201
-/* Emits the `vflogb.s vd, vj` instruction. */
4202
-static void __attribute__((unused))
4203
-tcg_out_opc_vflogb_s(TCGContext *s, TCGReg vd, TCGReg vj)
4204
-{
4205
- tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_S, vd, vj));
4206
-}
4207
-
4208
-/* Emits the `vflogb.d vd, vj` instruction. */
4209
-static void __attribute__((unused))
4210
-tcg_out_opc_vflogb_d(TCGContext *s, TCGReg vd, TCGReg vj)
4211
-{
4212
- tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_D, vd, vj));
4213
-}
4214
-
4215
-/* Emits the `vfclass.s vd, vj` instruction. */
4216
-static void __attribute__((unused))
4217
-tcg_out_opc_vfclass_s(TCGContext *s, TCGReg vd, TCGReg vj)
4218
-{
4219
- tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_S, vd, vj));
4220
-}
4221
-
4222
-/* Emits the `vfclass.d vd, vj` instruction. */
4223
-static void __attribute__((unused))
4224
-tcg_out_opc_vfclass_d(TCGContext *s, TCGReg vd, TCGReg vj)
4225
-{
4226
- tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_D, vd, vj));
4227
-}
4228
-
4229
-/* Emits the `vfsqrt.s vd, vj` instruction. */
4230
-static void __attribute__((unused))
4231
-tcg_out_opc_vfsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4232
-{
4233
- tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_S, vd, vj));
4234
-}
4235
-
4236
-/* Emits the `vfsqrt.d vd, vj` instruction. */
4237
-static void __attribute__((unused))
4238
-tcg_out_opc_vfsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4239
-{
4240
- tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_D, vd, vj));
4241
-}
4242
-
4243
-/* Emits the `vfrecip.s vd, vj` instruction. */
4244
-static void __attribute__((unused))
4245
-tcg_out_opc_vfrecip_s(TCGContext *s, TCGReg vd, TCGReg vj)
4246
-{
4247
- tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_S, vd, vj));
4248
-}
4249
-
4250
-/* Emits the `vfrecip.d vd, vj` instruction. */
4251
-static void __attribute__((unused))
4252
-tcg_out_opc_vfrecip_d(TCGContext *s, TCGReg vd, TCGReg vj)
4253
-{
4254
- tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_D, vd, vj));
4255
-}
4256
-
4257
-/* Emits the `vfrsqrt.s vd, vj` instruction. */
4258
-static void __attribute__((unused))
4259
-tcg_out_opc_vfrsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4260
-{
4261
- tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_S, vd, vj));
4262
-}
4263
-
4264
-/* Emits the `vfrsqrt.d vd, vj` instruction. */
4265
-static void __attribute__((unused))
4266
-tcg_out_opc_vfrsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4267
-{
4268
- tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_D, vd, vj));
4269
-}
4270
-
4271
-/* Emits the `vfrint.s vd, vj` instruction. */
4272
-static void __attribute__((unused))
4273
-tcg_out_opc_vfrint_s(TCGContext *s, TCGReg vd, TCGReg vj)
4274
-{
4275
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_S, vd, vj));
4276
-}
4277
-
4278
-/* Emits the `vfrint.d vd, vj` instruction. */
4279
-static void __attribute__((unused))
4280
-tcg_out_opc_vfrint_d(TCGContext *s, TCGReg vd, TCGReg vj)
4281
-{
4282
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_D, vd, vj));
4283
-}
4284
-
4285
-/* Emits the `vfrintrm.s vd, vj` instruction. */
4286
-static void __attribute__((unused))
4287
-tcg_out_opc_vfrintrm_s(TCGContext *s, TCGReg vd, TCGReg vj)
4288
-{
4289
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_S, vd, vj));
4290
-}
4291
-
4292
-/* Emits the `vfrintrm.d vd, vj` instruction. */
4293
-static void __attribute__((unused))
4294
-tcg_out_opc_vfrintrm_d(TCGContext *s, TCGReg vd, TCGReg vj)
4295
-{
4296
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_D, vd, vj));
4297
-}
4298
-
4299
-/* Emits the `vfrintrp.s vd, vj` instruction. */
4300
-static void __attribute__((unused))
4301
-tcg_out_opc_vfrintrp_s(TCGContext *s, TCGReg vd, TCGReg vj)
4302
-{
4303
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_S, vd, vj));
4304
-}
4305
-
4306
-/* Emits the `vfrintrp.d vd, vj` instruction. */
4307
-static void __attribute__((unused))
4308
-tcg_out_opc_vfrintrp_d(TCGContext *s, TCGReg vd, TCGReg vj)
4309
-{
4310
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_D, vd, vj));
4311
-}
4312
-
4313
-/* Emits the `vfrintrz.s vd, vj` instruction. */
4314
-static void __attribute__((unused))
4315
-tcg_out_opc_vfrintrz_s(TCGContext *s, TCGReg vd, TCGReg vj)
4316
-{
4317
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_S, vd, vj));
4318
-}
4319
-
4320
-/* Emits the `vfrintrz.d vd, vj` instruction. */
4321
-static void __attribute__((unused))
4322
-tcg_out_opc_vfrintrz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4323
-{
4324
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_D, vd, vj));
4325
-}
4326
-
4327
-/* Emits the `vfrintrne.s vd, vj` instruction. */
4328
-static void __attribute__((unused))
4329
-tcg_out_opc_vfrintrne_s(TCGContext *s, TCGReg vd, TCGReg vj)
4330
-{
4331
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_S, vd, vj));
4332
-}
4333
-
4334
-/* Emits the `vfrintrne.d vd, vj` instruction. */
4335
-static void __attribute__((unused))
4336
-tcg_out_opc_vfrintrne_d(TCGContext *s, TCGReg vd, TCGReg vj)
4337
-{
4338
- tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_D, vd, vj));
4339
-}
4340
-
4341
-/* Emits the `vfcvtl.s.h vd, vj` instruction. */
4342
-static void __attribute__((unused))
4343
-tcg_out_opc_vfcvtl_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4344
-{
4345
- tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_S_H, vd, vj));
4346
-}
4347
-
4348
-/* Emits the `vfcvth.s.h vd, vj` instruction. */
4349
-static void __attribute__((unused))
4350
-tcg_out_opc_vfcvth_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4351
-{
4352
- tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_S_H, vd, vj));
4353
-}
4354
-
4355
-/* Emits the `vfcvtl.d.s vd, vj` instruction. */
4356
-static void __attribute__((unused))
4357
-tcg_out_opc_vfcvtl_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4358
-{
4359
- tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_D_S, vd, vj));
4360
-}
4361
-
4362
-/* Emits the `vfcvth.d.s vd, vj` instruction. */
4363
-static void __attribute__((unused))
4364
-tcg_out_opc_vfcvth_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4365
-{
4366
- tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_D_S, vd, vj));
4367
-}
4368
-
4369
-/* Emits the `vffint.s.w vd, vj` instruction. */
4370
-static void __attribute__((unused))
4371
-tcg_out_opc_vffint_s_w(TCGContext *s, TCGReg vd, TCGReg vj)
4372
-{
4373
- tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_W, vd, vj));
4374
-}
4375
-
4376
-/* Emits the `vffint.s.wu vd, vj` instruction. */
4377
-static void __attribute__((unused))
4378
-tcg_out_opc_vffint_s_wu(TCGContext *s, TCGReg vd, TCGReg vj)
4379
-{
4380
- tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_WU, vd, vj));
4381
-}
4382
-
4383
-/* Emits the `vffint.d.l vd, vj` instruction. */
4384
-static void __attribute__((unused))
4385
-tcg_out_opc_vffint_d_l(TCGContext *s, TCGReg vd, TCGReg vj)
4386
-{
4387
- tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_L, vd, vj));
4388
-}
4389
-
4390
-/* Emits the `vffint.d.lu vd, vj` instruction. */
4391
-static void __attribute__((unused))
4392
-tcg_out_opc_vffint_d_lu(TCGContext *s, TCGReg vd, TCGReg vj)
4393
-{
4394
- tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_LU, vd, vj));
4395
-}
4396
-
4397
-/* Emits the `vffintl.d.w vd, vj` instruction. */
4398
-static void __attribute__((unused))
4399
-tcg_out_opc_vffintl_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4400
-{
4401
- tcg_out32(s, encode_vdvj_insn(OPC_VFFINTL_D_W, vd, vj));
4402
-}
4403
-
4404
-/* Emits the `vffinth.d.w vd, vj` instruction. */
4405
-static void __attribute__((unused))
4406
-tcg_out_opc_vffinth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4407
-{
4408
- tcg_out32(s, encode_vdvj_insn(OPC_VFFINTH_D_W, vd, vj));
4409
-}
4410
-
4411
-/* Emits the `vftint.w.s vd, vj` instruction. */
4412
-static void __attribute__((unused))
4413
-tcg_out_opc_vftint_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4414
-{
4415
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_W_S, vd, vj));
4416
-}
4417
-
4418
-/* Emits the `vftint.l.d vd, vj` instruction. */
4419
-static void __attribute__((unused))
4420
-tcg_out_opc_vftint_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4421
-{
4422
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_L_D, vd, vj));
4423
-}
4424
-
4425
-/* Emits the `vftintrm.w.s vd, vj` instruction. */
4426
-static void __attribute__((unused))
4427
-tcg_out_opc_vftintrm_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4428
-{
4429
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_W_S, vd, vj));
4430
-}
4431
-
4432
-/* Emits the `vftintrm.l.d vd, vj` instruction. */
4433
-static void __attribute__((unused))
4434
-tcg_out_opc_vftintrm_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4435
-{
4436
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_L_D, vd, vj));
4437
-}
4438
-
4439
-/* Emits the `vftintrp.w.s vd, vj` instruction. */
4440
-static void __attribute__((unused))
4441
-tcg_out_opc_vftintrp_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4442
-{
4443
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_W_S, vd, vj));
4444
-}
4445
-
4446
-/* Emits the `vftintrp.l.d vd, vj` instruction. */
4447
-static void __attribute__((unused))
4448
-tcg_out_opc_vftintrp_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4449
-{
4450
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_L_D, vd, vj));
4451
-}
4452
-
4453
-/* Emits the `vftintrz.w.s vd, vj` instruction. */
4454
-static void __attribute__((unused))
4455
-tcg_out_opc_vftintrz_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4456
-{
4457
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_W_S, vd, vj));
4458
-}
4459
-
4460
-/* Emits the `vftintrz.l.d vd, vj` instruction. */
4461
-static void __attribute__((unused))
4462
-tcg_out_opc_vftintrz_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4463
-{
4464
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_L_D, vd, vj));
4465
-}
4466
-
4467
-/* Emits the `vftintrne.w.s vd, vj` instruction. */
4468
-static void __attribute__((unused))
4469
-tcg_out_opc_vftintrne_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4470
-{
4471
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_W_S, vd, vj));
4472
-}
4473
-
4474
-/* Emits the `vftintrne.l.d vd, vj` instruction. */
4475
-static void __attribute__((unused))
4476
-tcg_out_opc_vftintrne_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4477
-{
4478
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_L_D, vd, vj));
4479
-}
4480
-
4481
-/* Emits the `vftint.wu.s vd, vj` instruction. */
4482
-static void __attribute__((unused))
4483
-tcg_out_opc_vftint_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
4484
-{
4485
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_WU_S, vd, vj));
4486
-}
4487
-
4488
-/* Emits the `vftint.lu.d vd, vj` instruction. */
4489
-static void __attribute__((unused))
4490
-tcg_out_opc_vftint_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
4491
-{
4492
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_LU_D, vd, vj));
4493
-}
4494
-
4495
-/* Emits the `vftintrz.wu.s vd, vj` instruction. */
4496
-static void __attribute__((unused))
4497
-tcg_out_opc_vftintrz_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
4498
-{
4499
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_WU_S, vd, vj));
4500
-}
4501
-
4502
-/* Emits the `vftintrz.lu.d vd, vj` instruction. */
4503
-static void __attribute__((unused))
4504
-tcg_out_opc_vftintrz_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
4505
-{
4506
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_LU_D, vd, vj));
4507
-}
4508
-
4509
-/* Emits the `vftintl.l.s vd, vj` instruction. */
4510
-static void __attribute__((unused))
4511
-tcg_out_opc_vftintl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4512
-{
4513
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTL_L_S, vd, vj));
4514
-}
4515
-
4516
-/* Emits the `vftinth.l.s vd, vj` instruction. */
4517
-static void __attribute__((unused))
4518
-tcg_out_opc_vftinth_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4519
-{
4520
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTH_L_S, vd, vj));
4521
-}
4522
-
4523
-/* Emits the `vftintrml.l.s vd, vj` instruction. */
4524
-static void __attribute__((unused))
4525
-tcg_out_opc_vftintrml_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4526
-{
4527
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRML_L_S, vd, vj));
4528
-}
4529
-
4530
-/* Emits the `vftintrmh.l.s vd, vj` instruction. */
4531
-static void __attribute__((unused))
4532
-tcg_out_opc_vftintrmh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4533
-{
4534
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRMH_L_S, vd, vj));
4535
-}
4536
-
4537
-/* Emits the `vftintrpl.l.s vd, vj` instruction. */
4538
-static void __attribute__((unused))
4539
-tcg_out_opc_vftintrpl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4540
-{
4541
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPL_L_S, vd, vj));
4542
-}
4543
-
4544
-/* Emits the `vftintrph.l.s vd, vj` instruction. */
4545
-static void __attribute__((unused))
4546
-tcg_out_opc_vftintrph_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4547
-{
4548
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPH_L_S, vd, vj));
4549
-}
4550
-
4551
-/* Emits the `vftintrzl.l.s vd, vj` instruction. */
4552
-static void __attribute__((unused))
4553
-tcg_out_opc_vftintrzl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4554
-{
4555
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZL_L_S, vd, vj));
4556
-}
4557
-
4558
-/* Emits the `vftintrzh.l.s vd, vj` instruction. */
4559
-static void __attribute__((unused))
4560
-tcg_out_opc_vftintrzh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4561
-{
4562
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZH_L_S, vd, vj));
4563
-}
4564
-
4565
-/* Emits the `vftintrnel.l.s vd, vj` instruction. */
4566
-static void __attribute__((unused))
4567
-tcg_out_opc_vftintrnel_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4568
-{
4569
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEL_L_S, vd, vj));
4570
-}
4571
-
4572
-/* Emits the `vftintrneh.l.s vd, vj` instruction. */
4573
-static void __attribute__((unused))
4574
-tcg_out_opc_vftintrneh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
4575
-{
4576
- tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEH_L_S, vd, vj));
4577
-}
4578
-
4579
-/* Emits the `vexth.h.b vd, vj` instruction. */
4580
-static void __attribute__((unused))
4581
-tcg_out_opc_vexth_h_b(TCGContext *s, TCGReg vd, TCGReg vj)
4582
-{
4583
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_H_B, vd, vj));
4584
-}
4585
-
4586
-/* Emits the `vexth.w.h vd, vj` instruction. */
4587
-static void __attribute__((unused))
4588
-tcg_out_opc_vexth_w_h(TCGContext *s, TCGReg vd, TCGReg vj)
4589
-{
4590
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_W_H, vd, vj));
4591
-}
4592
-
4593
-/* Emits the `vexth.d.w vd, vj` instruction. */
4594
-static void __attribute__((unused))
4595
-tcg_out_opc_vexth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4596
-{
4597
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_D_W, vd, vj));
4598
-}
4599
-
4600
-/* Emits the `vexth.q.d vd, vj` instruction. */
4601
-static void __attribute__((unused))
4602
-tcg_out_opc_vexth_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
4603
-{
4604
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_Q_D, vd, vj));
4605
-}
4606
-
4607
-/* Emits the `vexth.hu.bu vd, vj` instruction. */
4608
-static void __attribute__((unused))
4609
-tcg_out_opc_vexth_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj)
4610
-{
4611
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_HU_BU, vd, vj));
4612
-}
4613
-
4614
-/* Emits the `vexth.wu.hu vd, vj` instruction. */
4615
-static void __attribute__((unused))
4616
-tcg_out_opc_vexth_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj)
4617
-{
4618
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_WU_HU, vd, vj));
4619
-}
4620
-
4621
-/* Emits the `vexth.du.wu vd, vj` instruction. */
4622
-static void __attribute__((unused))
4623
-tcg_out_opc_vexth_du_wu(TCGContext *s, TCGReg vd, TCGReg vj)
4624
-{
4625
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_DU_WU, vd, vj));
4626
-}
4627
-
4628
-/* Emits the `vexth.qu.du vd, vj` instruction. */
4629
-static void __attribute__((unused))
4630
-tcg_out_opc_vexth_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
4631
-{
4632
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_QU_DU, vd, vj));
4633
-}
4634
-
4635
/* Emits the `vreplgr2vr.b vd, j` instruction. */
4636
static void __attribute__((unused))
4637
tcg_out_opc_vreplgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j)
4638
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vrotri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4639
tcg_out32(s, encode_vdvjuk6_insn(OPC_VROTRI_D, vd, vj, uk6));
4640
}
4641
4642
-/* Emits the `vsrlri.b vd, vj, uk3` instruction. */
4643
-static void __attribute__((unused))
4644
-tcg_out_opc_vsrlri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4645
-{
4646
- tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLRI_B, vd, vj, uk3));
4647
-}
4648
-
4649
-/* Emits the `vsrlri.h vd, vj, uk4` instruction. */
4650
-static void __attribute__((unused))
4651
-tcg_out_opc_vsrlri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4652
-{
4653
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRI_H, vd, vj, uk4));
4654
-}
4655
-
4656
-/* Emits the `vsrlri.w vd, vj, uk5` instruction. */
4657
-static void __attribute__((unused))
4658
-tcg_out_opc_vsrlri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4659
-{
4660
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRI_W, vd, vj, uk5));
4661
-}
4662
-
4663
-/* Emits the `vsrlri.d vd, vj, uk6` instruction. */
4664
-static void __attribute__((unused))
4665
-tcg_out_opc_vsrlri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4666
-{
4667
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRI_D, vd, vj, uk6));
4668
-}
4669
-
4670
-/* Emits the `vsrari.b vd, vj, uk3` instruction. */
4671
-static void __attribute__((unused))
4672
-tcg_out_opc_vsrari_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4673
-{
4674
- tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRARI_B, vd, vj, uk3));
4675
-}
4676
-
4677
-/* Emits the `vsrari.h vd, vj, uk4` instruction. */
4678
-static void __attribute__((unused))
4679
-tcg_out_opc_vsrari_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4680
-{
4681
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARI_H, vd, vj, uk4));
4682
-}
4683
-
4684
-/* Emits the `vsrari.w vd, vj, uk5` instruction. */
4685
-static void __attribute__((unused))
4686
-tcg_out_opc_vsrari_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4687
-{
4688
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARI_W, vd, vj, uk5));
4689
-}
4690
-
4691
-/* Emits the `vsrari.d vd, vj, uk6` instruction. */
4692
-static void __attribute__((unused))
4693
-tcg_out_opc_vsrari_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4694
-{
4695
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARI_D, vd, vj, uk6));
4696
-}
4697
-
4698
/* Emits the `vinsgr2vr.b vd, j, uk4` instruction. */
4699
static void __attribute__((unused))
4700
tcg_out_opc_vinsgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk4)
4701
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vreplvei_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk1)
4702
tcg_out32(s, encode_vdvjuk1_insn(OPC_VREPLVEI_D, vd, vj, uk1));
4703
}
4704
4705
-/* Emits the `vsllwil.h.b vd, vj, uk3` instruction. */
4706
-static void __attribute__((unused))
4707
-tcg_out_opc_vsllwil_h_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4708
-{
4709
- tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_H_B, vd, vj, uk3));
4710
-}
4711
-
4712
-/* Emits the `vsllwil.w.h vd, vj, uk4` instruction. */
4713
-static void __attribute__((unused))
4714
-tcg_out_opc_vsllwil_w_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4715
-{
4716
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_W_H, vd, vj, uk4));
4717
-}
4718
-
4719
-/* Emits the `vsllwil.d.w vd, vj, uk5` instruction. */
4720
-static void __attribute__((unused))
4721
-tcg_out_opc_vsllwil_d_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4722
-{
4723
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_D_W, vd, vj, uk5));
4724
-}
4725
-
4726
-/* Emits the `vextl.q.d vd, vj` instruction. */
4727
-static void __attribute__((unused))
4728
-tcg_out_opc_vextl_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
4729
-{
4730
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_Q_D, vd, vj));
4731
-}
4732
-
4733
-/* Emits the `vsllwil.hu.bu vd, vj, uk3` instruction. */
4734
-static void __attribute__((unused))
4735
-tcg_out_opc_vsllwil_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4736
-{
4737
- tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_HU_BU, vd, vj, uk3));
4738
-}
4739
-
4740
-/* Emits the `vsllwil.wu.hu vd, vj, uk4` instruction. */
4741
-static void __attribute__((unused))
4742
-tcg_out_opc_vsllwil_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4743
-{
4744
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_WU_HU, vd, vj, uk4));
4745
-}
4746
-
4747
-/* Emits the `vsllwil.du.wu vd, vj, uk5` instruction. */
4748
-static void __attribute__((unused))
4749
-tcg_out_opc_vsllwil_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4750
-{
4751
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_DU_WU, vd, vj, uk5));
4752
-}
4753
-
4754
-/* Emits the `vextl.qu.du vd, vj` instruction. */
4755
-static void __attribute__((unused))
4756
-tcg_out_opc_vextl_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
4757
-{
4758
- tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_QU_DU, vd, vj));
4759
-}
4760
-
4761
/* Emits the `vbitclri.b vd, vj, uk3` instruction. */
4762
static void __attribute__((unused))
4763
tcg_out_opc_vbitclri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4764
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vbitrevi_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4765
tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITREVI_D, vd, vj, uk6));
4766
}
4767
4768
-/* Emits the `vsat.b vd, vj, uk3` instruction. */
4769
-static void __attribute__((unused))
4770
-tcg_out_opc_vsat_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4771
-{
4772
- tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_B, vd, vj, uk3));
4773
-}
4774
-
4775
-/* Emits the `vsat.h vd, vj, uk4` instruction. */
4776
-static void __attribute__((unused))
4777
-tcg_out_opc_vsat_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4778
-{
4779
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_H, vd, vj, uk4));
4780
-}
4781
-
4782
-/* Emits the `vsat.w vd, vj, uk5` instruction. */
4783
-static void __attribute__((unused))
4784
-tcg_out_opc_vsat_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4785
-{
4786
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_W, vd, vj, uk5));
4787
-}
4788
-
4789
-/* Emits the `vsat.d vd, vj, uk6` instruction. */
4790
-static void __attribute__((unused))
4791
-tcg_out_opc_vsat_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4792
-{
4793
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_D, vd, vj, uk6));
4794
-}
4795
-
4796
-/* Emits the `vsat.bu vd, vj, uk3` instruction. */
4797
-static void __attribute__((unused))
4798
-tcg_out_opc_vsat_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4799
-{
4800
- tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_BU, vd, vj, uk3));
4801
-}
4802
-
4803
-/* Emits the `vsat.hu vd, vj, uk4` instruction. */
4804
-static void __attribute__((unused))
4805
-tcg_out_opc_vsat_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4806
-{
4807
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_HU, vd, vj, uk4));
4808
-}
4809
-
4810
-/* Emits the `vsat.wu vd, vj, uk5` instruction. */
4811
-static void __attribute__((unused))
4812
-tcg_out_opc_vsat_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4813
-{
4814
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_WU, vd, vj, uk5));
4815
-}
4816
-
4817
-/* Emits the `vsat.du vd, vj, uk6` instruction. */
4818
-static void __attribute__((unused))
4819
-tcg_out_opc_vsat_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4820
-{
4821
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_DU, vd, vj, uk6));
4822
-}
4823
-
4824
/* Emits the `vslli.b vd, vj, uk3` instruction. */
4825
static void __attribute__((unused))
4826
tcg_out_opc_vslli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
4827
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vsrai_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4828
tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRAI_D, vd, vj, uk6));
4829
}
4830
4831
-/* Emits the `vsrlni.b.h vd, vj, uk4` instruction. */
4832
-static void __attribute__((unused))
4833
-tcg_out_opc_vsrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4834
-{
4835
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLNI_B_H, vd, vj, uk4));
4836
-}
4837
-
4838
-/* Emits the `vsrlni.h.w vd, vj, uk5` instruction. */
4839
-static void __attribute__((unused))
4840
-tcg_out_opc_vsrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4841
-{
4842
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLNI_H_W, vd, vj, uk5));
4843
-}
4844
-
4845
-/* Emits the `vsrlni.w.d vd, vj, uk6` instruction. */
4846
-static void __attribute__((unused))
4847
-tcg_out_opc_vsrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4848
-{
4849
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLNI_W_D, vd, vj, uk6));
4850
-}
4851
-
4852
-/* Emits the `vsrlni.d.q vd, vj, uk7` instruction. */
4853
-static void __attribute__((unused))
4854
-tcg_out_opc_vsrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
4855
-{
4856
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLNI_D_Q, vd, vj, uk7));
4857
-}
4858
-
4859
-/* Emits the `vsrlrni.b.h vd, vj, uk4` instruction. */
4860
-static void __attribute__((unused))
4861
-tcg_out_opc_vsrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4862
-{
4863
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRNI_B_H, vd, vj, uk4));
4864
-}
4865
-
4866
-/* Emits the `vsrlrni.h.w vd, vj, uk5` instruction. */
4867
-static void __attribute__((unused))
4868
-tcg_out_opc_vsrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4869
-{
4870
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRNI_H_W, vd, vj, uk5));
4871
-}
4872
-
4873
-/* Emits the `vsrlrni.w.d vd, vj, uk6` instruction. */
4874
-static void __attribute__((unused))
4875
-tcg_out_opc_vsrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4876
-{
4877
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRNI_W_D, vd, vj, uk6));
4878
-}
4879
-
4880
-/* Emits the `vsrlrni.d.q vd, vj, uk7` instruction. */
4881
-static void __attribute__((unused))
4882
-tcg_out_opc_vsrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
4883
-{
4884
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLRNI_D_Q, vd, vj, uk7));
4885
-}
4886
-
4887
-/* Emits the `vssrlni.b.h vd, vj, uk4` instruction. */
4888
-static void __attribute__((unused))
4889
-tcg_out_opc_vssrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4890
-{
4891
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_B_H, vd, vj, uk4));
4892
-}
4893
-
4894
-/* Emits the `vssrlni.h.w vd, vj, uk5` instruction. */
4895
-static void __attribute__((unused))
4896
-tcg_out_opc_vssrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4897
-{
4898
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_H_W, vd, vj, uk5));
4899
-}
4900
-
4901
-/* Emits the `vssrlni.w.d vd, vj, uk6` instruction. */
4902
-static void __attribute__((unused))
4903
-tcg_out_opc_vssrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4904
-{
4905
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_W_D, vd, vj, uk6));
4906
-}
4907
-
4908
-/* Emits the `vssrlni.d.q vd, vj, uk7` instruction. */
4909
-static void __attribute__((unused))
4910
-tcg_out_opc_vssrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
4911
-{
4912
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_D_Q, vd, vj, uk7));
4913
-}
4914
-
4915
-/* Emits the `vssrlni.bu.h vd, vj, uk4` instruction. */
4916
-static void __attribute__((unused))
4917
-tcg_out_opc_vssrlni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4918
-{
4919
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_BU_H, vd, vj, uk4));
4920
-}
4921
-
4922
-/* Emits the `vssrlni.hu.w vd, vj, uk5` instruction. */
4923
-static void __attribute__((unused))
4924
-tcg_out_opc_vssrlni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4925
-{
4926
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_HU_W, vd, vj, uk5));
4927
-}
4928
-
4929
-/* Emits the `vssrlni.wu.d vd, vj, uk6` instruction. */
4930
-static void __attribute__((unused))
4931
-tcg_out_opc_vssrlni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4932
-{
4933
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_WU_D, vd, vj, uk6));
4934
-}
4935
-
4936
-/* Emits the `vssrlni.du.q vd, vj, uk7` instruction. */
4937
-static void __attribute__((unused))
4938
-tcg_out_opc_vssrlni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
4939
-{
4940
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_DU_Q, vd, vj, uk7));
4941
-}
4942
-
4943
-/* Emits the `vssrlrni.b.h vd, vj, uk4` instruction. */
4944
-static void __attribute__((unused))
4945
-tcg_out_opc_vssrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4946
-{
4947
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_B_H, vd, vj, uk4));
4948
-}
4949
-
4950
-/* Emits the `vssrlrni.h.w vd, vj, uk5` instruction. */
4951
-static void __attribute__((unused))
4952
-tcg_out_opc_vssrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4953
-{
4954
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_H_W, vd, vj, uk5));
4955
-}
4956
-
4957
-/* Emits the `vssrlrni.w.d vd, vj, uk6` instruction. */
4958
-static void __attribute__((unused))
4959
-tcg_out_opc_vssrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4960
-{
4961
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_W_D, vd, vj, uk6));
4962
-}
4963
-
4964
-/* Emits the `vssrlrni.d.q vd, vj, uk7` instruction. */
4965
-static void __attribute__((unused))
4966
-tcg_out_opc_vssrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
4967
-{
4968
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_D_Q, vd, vj, uk7));
4969
-}
4970
-
4971
-/* Emits the `vssrlrni.bu.h vd, vj, uk4` instruction. */
4972
-static void __attribute__((unused))
4973
-tcg_out_opc_vssrlrni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
4974
-{
4975
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_BU_H, vd, vj, uk4));
4976
-}
4977
-
4978
-/* Emits the `vssrlrni.hu.w vd, vj, uk5` instruction. */
4979
-static void __attribute__((unused))
4980
-tcg_out_opc_vssrlrni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4981
-{
4982
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_HU_W, vd, vj, uk5));
4983
-}
4984
-
4985
-/* Emits the `vssrlrni.wu.d vd, vj, uk6` instruction. */
4986
-static void __attribute__((unused))
4987
-tcg_out_opc_vssrlrni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
4988
-{
4989
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_WU_D, vd, vj, uk6));
4990
-}
4991
-
4992
-/* Emits the `vssrlrni.du.q vd, vj, uk7` instruction. */
4993
-static void __attribute__((unused))
4994
-tcg_out_opc_vssrlrni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
4995
-{
4996
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_DU_Q, vd, vj, uk7));
4997
-}
4998
-
4999
-/* Emits the `vsrani.b.h vd, vj, uk4` instruction. */
5000
-static void __attribute__((unused))
5001
-tcg_out_opc_vsrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5002
-{
5003
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRANI_B_H, vd, vj, uk4));
5004
-}
5005
-
5006
-/* Emits the `vsrani.h.w vd, vj, uk5` instruction. */
5007
-static void __attribute__((unused))
5008
-tcg_out_opc_vsrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5009
-{
5010
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRANI_H_W, vd, vj, uk5));
5011
-}
5012
-
5013
-/* Emits the `vsrani.w.d vd, vj, uk6` instruction. */
5014
-static void __attribute__((unused))
5015
-tcg_out_opc_vsrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5016
-{
5017
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRANI_W_D, vd, vj, uk6));
5018
-}
5019
-
5020
-/* Emits the `vsrani.d.q vd, vj, uk7` instruction. */
5021
-static void __attribute__((unused))
5022
-tcg_out_opc_vsrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5023
-{
5024
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRANI_D_Q, vd, vj, uk7));
5025
-}
5026
-
5027
-/* Emits the `vsrarni.b.h vd, vj, uk4` instruction. */
5028
-static void __attribute__((unused))
5029
-tcg_out_opc_vsrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5030
-{
5031
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARNI_B_H, vd, vj, uk4));
5032
-}
5033
-
5034
-/* Emits the `vsrarni.h.w vd, vj, uk5` instruction. */
5035
-static void __attribute__((unused))
5036
-tcg_out_opc_vsrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5037
-{
5038
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARNI_H_W, vd, vj, uk5));
5039
-}
5040
-
5041
-/* Emits the `vsrarni.w.d vd, vj, uk6` instruction. */
5042
-static void __attribute__((unused))
5043
-tcg_out_opc_vsrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5044
-{
5045
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARNI_W_D, vd, vj, uk6));
5046
-}
5047
-
5048
-/* Emits the `vsrarni.d.q vd, vj, uk7` instruction. */
5049
-static void __attribute__((unused))
5050
-tcg_out_opc_vsrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5051
-{
5052
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRARNI_D_Q, vd, vj, uk7));
5053
-}
5054
-
5055
-/* Emits the `vssrani.b.h vd, vj, uk4` instruction. */
5056
-static void __attribute__((unused))
5057
-tcg_out_opc_vssrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5058
-{
5059
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_B_H, vd, vj, uk4));
5060
-}
5061
-
5062
-/* Emits the `vssrani.h.w vd, vj, uk5` instruction. */
5063
-static void __attribute__((unused))
5064
-tcg_out_opc_vssrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5065
-{
5066
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_H_W, vd, vj, uk5));
5067
-}
5068
-
5069
-/* Emits the `vssrani.w.d vd, vj, uk6` instruction. */
5070
-static void __attribute__((unused))
5071
-tcg_out_opc_vssrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5072
-{
5073
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_W_D, vd, vj, uk6));
5074
-}
5075
-
5076
-/* Emits the `vssrani.d.q vd, vj, uk7` instruction. */
5077
-static void __attribute__((unused))
5078
-tcg_out_opc_vssrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5079
-{
5080
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_D_Q, vd, vj, uk7));
5081
-}
5082
-
5083
-/* Emits the `vssrani.bu.h vd, vj, uk4` instruction. */
5084
-static void __attribute__((unused))
5085
-tcg_out_opc_vssrani_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5086
-{
5087
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_BU_H, vd, vj, uk4));
5088
-}
5089
-
5090
-/* Emits the `vssrani.hu.w vd, vj, uk5` instruction. */
5091
-static void __attribute__((unused))
5092
-tcg_out_opc_vssrani_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5093
-{
5094
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_HU_W, vd, vj, uk5));
5095
-}
5096
-
5097
-/* Emits the `vssrani.wu.d vd, vj, uk6` instruction. */
5098
-static void __attribute__((unused))
5099
-tcg_out_opc_vssrani_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5100
-{
5101
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_WU_D, vd, vj, uk6));
5102
-}
5103
-
5104
-/* Emits the `vssrani.du.q vd, vj, uk7` instruction. */
5105
-static void __attribute__((unused))
5106
-tcg_out_opc_vssrani_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5107
-{
5108
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_DU_Q, vd, vj, uk7));
5109
-}
5110
-
5111
-/* Emits the `vssrarni.b.h vd, vj, uk4` instruction. */
5112
-static void __attribute__((unused))
5113
-tcg_out_opc_vssrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5114
-{
5115
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_B_H, vd, vj, uk4));
5116
-}
5117
-
5118
-/* Emits the `vssrarni.h.w vd, vj, uk5` instruction. */
5119
-static void __attribute__((unused))
5120
-tcg_out_opc_vssrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5121
-{
5122
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_H_W, vd, vj, uk5));
5123
-}
5124
-
5125
-/* Emits the `vssrarni.w.d vd, vj, uk6` instruction. */
5126
-static void __attribute__((unused))
5127
-tcg_out_opc_vssrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5128
-{
5129
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_W_D, vd, vj, uk6));
5130
-}
5131
-
5132
-/* Emits the `vssrarni.d.q vd, vj, uk7` instruction. */
5133
-static void __attribute__((unused))
5134
-tcg_out_opc_vssrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5135
-{
5136
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_D_Q, vd, vj, uk7));
5137
-}
5138
-
5139
-/* Emits the `vssrarni.bu.h vd, vj, uk4` instruction. */
5140
-static void __attribute__((unused))
5141
-tcg_out_opc_vssrarni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5142
-{
5143
- tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_BU_H, vd, vj, uk4));
5144
-}
5145
-
5146
-/* Emits the `vssrarni.hu.w vd, vj, uk5` instruction. */
5147
-static void __attribute__((unused))
5148
-tcg_out_opc_vssrarni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5149
-{
5150
- tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_HU_W, vd, vj, uk5));
5151
-}
5152
-
5153
-/* Emits the `vssrarni.wu.d vd, vj, uk6` instruction. */
5154
-static void __attribute__((unused))
5155
-tcg_out_opc_vssrarni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5156
-{
5157
- tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_WU_D, vd, vj, uk6));
5158
-}
5159
-
5160
-/* Emits the `vssrarni.du.q vd, vj, uk7` instruction. */
5161
-static void __attribute__((unused))
5162
-tcg_out_opc_vssrarni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5163
-{
5164
- tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_DU_Q, vd, vj, uk7));
5165
-}
5166
-
5167
-/* Emits the `vextrins.d vd, vj, uk8` instruction. */
5168
-static void __attribute__((unused))
5169
-tcg_out_opc_vextrins_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5170
-{
5171
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_D, vd, vj, uk8));
5172
-}
5173
-
5174
-/* Emits the `vextrins.w vd, vj, uk8` instruction. */
5175
-static void __attribute__((unused))
5176
-tcg_out_opc_vextrins_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5177
-{
5178
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_W, vd, vj, uk8));
5179
-}
5180
-
5181
-/* Emits the `vextrins.h vd, vj, uk8` instruction. */
5182
-static void __attribute__((unused))
5183
-tcg_out_opc_vextrins_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5184
-{
5185
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_H, vd, vj, uk8));
5186
-}
5187
-
5188
-/* Emits the `vextrins.b vd, vj, uk8` instruction. */
5189
-static void __attribute__((unused))
5190
-tcg_out_opc_vextrins_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5191
-{
5192
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_B, vd, vj, uk8));
5193
-}
5194
-
5195
-/* Emits the `vshuf4i.b vd, vj, uk8` instruction. */
5196
-static void __attribute__((unused))
5197
-tcg_out_opc_vshuf4i_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5198
-{
5199
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_B, vd, vj, uk8));
5200
-}
5201
-
5202
-/* Emits the `vshuf4i.h vd, vj, uk8` instruction. */
5203
-static void __attribute__((unused))
5204
-tcg_out_opc_vshuf4i_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5205
-{
5206
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_H, vd, vj, uk8));
5207
-}
5208
-
5209
-/* Emits the `vshuf4i.w vd, vj, uk8` instruction. */
5210
-static void __attribute__((unused))
5211
-tcg_out_opc_vshuf4i_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5212
-{
5213
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_W, vd, vj, uk8));
5214
-}
5215
-
5216
-/* Emits the `vshuf4i.d vd, vj, uk8` instruction. */
5217
-static void __attribute__((unused))
5218
-tcg_out_opc_vshuf4i_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5219
-{
5220
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_D, vd, vj, uk8));
5221
-}
5222
-
5223
/* Emits the `vbitseli.b vd, vj, uk8` instruction. */
5224
static void __attribute__((unused))
5225
tcg_out_opc_vbitseli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5226
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_vldi(TCGContext *s, TCGReg vd, int32_t sj13)
5227
tcg_out32(s, encode_vdsj13_insn(OPC_VLDI, vd, sj13));
5228
}
5229
5230
-/* Emits the `vpermi.w vd, vj, uk8` instruction. */
5231
+/* Emits the `xvseq.b xd, xj, xk` instruction. */
5232
static void __attribute__((unused))
5233
-tcg_out_opc_vpermi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
5234
+tcg_out_opc_xvseq_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5235
{
85
{
5236
- tcg_out32(s, encode_vdvjuk8_insn(OPC_VPERMI_W, vd, vj, uk8));
86
uint64_t z_mask, s_mask, s_mask_old;
5237
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSEQ_B, xd, xj, xk));
5238
+}
5239
+
5240
+/* Emits the `xvseq.h xd, xj, xk` instruction. */
5241
+static void __attribute__((unused))
5242
+tcg_out_opc_xvseq_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5243
+{
5244
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSEQ_H, xd, xj, xk));
5245
+}
5246
+
5247
+/* Emits the `xvseq.w xd, xj, xk` instruction. */
5248
+static void __attribute__((unused))
5249
+tcg_out_opc_xvseq_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5250
+{
5251
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSEQ_W, xd, xj, xk));
5252
+}
5253
+
5254
+/* Emits the `xvseq.d xd, xj, xk` instruction. */
5255
+static void __attribute__((unused))
5256
+tcg_out_opc_xvseq_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5257
+{
5258
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSEQ_D, xd, xj, xk));
5259
+}
5260
+
5261
+/* Emits the `xvsle.b xd, xj, xk` instruction. */
5262
+static void __attribute__((unused))
5263
+tcg_out_opc_xvsle_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5264
+{
5265
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_B, xd, xj, xk));
5266
+}
5267
+
5268
+/* Emits the `xvsle.h xd, xj, xk` instruction. */
5269
+static void __attribute__((unused))
5270
+tcg_out_opc_xvsle_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5271
+{
5272
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_H, xd, xj, xk));
5273
+}
5274
+
5275
+/* Emits the `xvsle.w xd, xj, xk` instruction. */
5276
+static void __attribute__((unused))
5277
+tcg_out_opc_xvsle_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5278
+{
5279
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_W, xd, xj, xk));
5280
+}
5281
+
5282
+/* Emits the `xvsle.d xd, xj, xk` instruction. */
5283
+static void __attribute__((unused))
5284
+tcg_out_opc_xvsle_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5285
+{
5286
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_D, xd, xj, xk));
5287
+}
5288
+
5289
+/* Emits the `xvsle.bu xd, xj, xk` instruction. */
5290
+static void __attribute__((unused))
5291
+tcg_out_opc_xvsle_bu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5292
+{
5293
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_BU, xd, xj, xk));
5294
+}
5295
+
5296
+/* Emits the `xvsle.hu xd, xj, xk` instruction. */
5297
+static void __attribute__((unused))
5298
+tcg_out_opc_xvsle_hu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5299
+{
5300
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_HU, xd, xj, xk));
5301
+}
5302
+
5303
+/* Emits the `xvsle.wu xd, xj, xk` instruction. */
5304
+static void __attribute__((unused))
5305
+tcg_out_opc_xvsle_wu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5306
+{
5307
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_WU, xd, xj, xk));
5308
+}
5309
+
5310
+/* Emits the `xvsle.du xd, xj, xk` instruction. */
5311
+static void __attribute__((unused))
5312
+tcg_out_opc_xvsle_du(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5313
+{
5314
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLE_DU, xd, xj, xk));
5315
+}
5316
+
5317
+/* Emits the `xvslt.b xd, xj, xk` instruction. */
5318
+static void __attribute__((unused))
5319
+tcg_out_opc_xvslt_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5320
+{
5321
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_B, xd, xj, xk));
5322
+}
5323
+
5324
+/* Emits the `xvslt.h xd, xj, xk` instruction. */
5325
+static void __attribute__((unused))
5326
+tcg_out_opc_xvslt_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5327
+{
5328
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_H, xd, xj, xk));
5329
+}
5330
+
5331
+/* Emits the `xvslt.w xd, xj, xk` instruction. */
5332
+static void __attribute__((unused))
5333
+tcg_out_opc_xvslt_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5334
+{
5335
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_W, xd, xj, xk));
5336
+}
5337
+
5338
+/* Emits the `xvslt.d xd, xj, xk` instruction. */
5339
+static void __attribute__((unused))
5340
+tcg_out_opc_xvslt_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5341
+{
5342
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_D, xd, xj, xk));
5343
+}
5344
+
5345
+/* Emits the `xvslt.bu xd, xj, xk` instruction. */
5346
+static void __attribute__((unused))
5347
+tcg_out_opc_xvslt_bu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5348
+{
5349
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_BU, xd, xj, xk));
5350
+}
5351
+
5352
+/* Emits the `xvslt.hu xd, xj, xk` instruction. */
5353
+static void __attribute__((unused))
5354
+tcg_out_opc_xvslt_hu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5355
+{
5356
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_HU, xd, xj, xk));
5357
+}
5358
+
5359
+/* Emits the `xvslt.wu xd, xj, xk` instruction. */
5360
+static void __attribute__((unused))
5361
+tcg_out_opc_xvslt_wu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5362
+{
5363
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_WU, xd, xj, xk));
5364
+}
5365
+
5366
+/* Emits the `xvslt.du xd, xj, xk` instruction. */
5367
+static void __attribute__((unused))
5368
+tcg_out_opc_xvslt_du(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5369
+{
5370
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLT_DU, xd, xj, xk));
5371
+}
5372
+
5373
+/* Emits the `xvadd.b xd, xj, xk` instruction. */
5374
+static void __attribute__((unused))
5375
+tcg_out_opc_xvadd_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5376
+{
5377
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVADD_B, xd, xj, xk));
5378
+}
5379
+
5380
+/* Emits the `xvadd.h xd, xj, xk` instruction. */
5381
+static void __attribute__((unused))
5382
+tcg_out_opc_xvadd_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5383
+{
5384
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVADD_H, xd, xj, xk));
5385
+}
5386
+
5387
+/* Emits the `xvadd.w xd, xj, xk` instruction. */
5388
+static void __attribute__((unused))
5389
+tcg_out_opc_xvadd_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5390
+{
5391
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVADD_W, xd, xj, xk));
5392
+}
5393
+
5394
+/* Emits the `xvadd.d xd, xj, xk` instruction. */
5395
+static void __attribute__((unused))
5396
+tcg_out_opc_xvadd_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5397
+{
5398
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVADD_D, xd, xj, xk));
5399
+}
5400
+
5401
+/* Emits the `xvsub.b xd, xj, xk` instruction. */
5402
+static void __attribute__((unused))
5403
+tcg_out_opc_xvsub_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5404
+{
5405
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSUB_B, xd, xj, xk));
5406
+}
5407
+
5408
+/* Emits the `xvsub.h xd, xj, xk` instruction. */
5409
+static void __attribute__((unused))
5410
+tcg_out_opc_xvsub_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5411
+{
5412
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSUB_H, xd, xj, xk));
5413
+}
5414
+
5415
+/* Emits the `xvsub.w xd, xj, xk` instruction. */
5416
+static void __attribute__((unused))
5417
+tcg_out_opc_xvsub_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5418
+{
5419
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSUB_W, xd, xj, xk));
5420
+}
5421
+
5422
+/* Emits the `xvsub.d xd, xj, xk` instruction. */
5423
+static void __attribute__((unused))
5424
+tcg_out_opc_xvsub_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5425
+{
5426
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSUB_D, xd, xj, xk));
5427
+}
5428
+
5429
+/* Emits the `xvsadd.b xd, xj, xk` instruction. */
5430
+static void __attribute__((unused))
5431
+tcg_out_opc_xvsadd_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5432
+{
5433
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_B, xd, xj, xk));
5434
+}
5435
+
5436
+/* Emits the `xvsadd.h xd, xj, xk` instruction. */
5437
+static void __attribute__((unused))
5438
+tcg_out_opc_xvsadd_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5439
+{
5440
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_H, xd, xj, xk));
5441
+}
5442
+
5443
+/* Emits the `xvsadd.w xd, xj, xk` instruction. */
5444
+static void __attribute__((unused))
5445
+tcg_out_opc_xvsadd_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5446
+{
5447
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_W, xd, xj, xk));
5448
+}
5449
+
5450
+/* Emits the `xvsadd.d xd, xj, xk` instruction. */
5451
+static void __attribute__((unused))
5452
+tcg_out_opc_xvsadd_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5453
+{
5454
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_D, xd, xj, xk));
5455
+}
5456
+
5457
+/* Emits the `xvssub.b xd, xj, xk` instruction. */
5458
+static void __attribute__((unused))
5459
+tcg_out_opc_xvssub_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5460
+{
5461
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_B, xd, xj, xk));
5462
+}
5463
+
5464
+/* Emits the `xvssub.h xd, xj, xk` instruction. */
5465
+static void __attribute__((unused))
5466
+tcg_out_opc_xvssub_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5467
+{
5468
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_H, xd, xj, xk));
5469
+}
5470
+
5471
+/* Emits the `xvssub.w xd, xj, xk` instruction. */
5472
+static void __attribute__((unused))
5473
+tcg_out_opc_xvssub_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5474
+{
5475
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_W, xd, xj, xk));
5476
+}
5477
+
5478
+/* Emits the `xvssub.d xd, xj, xk` instruction. */
5479
+static void __attribute__((unused))
5480
+tcg_out_opc_xvssub_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5481
+{
5482
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_D, xd, xj, xk));
5483
+}
5484
+
5485
+/* Emits the `xvsadd.bu xd, xj, xk` instruction. */
5486
+static void __attribute__((unused))
5487
+tcg_out_opc_xvsadd_bu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5488
+{
5489
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_BU, xd, xj, xk));
5490
+}
5491
+
5492
+/* Emits the `xvsadd.hu xd, xj, xk` instruction. */
5493
+static void __attribute__((unused))
5494
+tcg_out_opc_xvsadd_hu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5495
+{
5496
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_HU, xd, xj, xk));
5497
+}
5498
+
5499
+/* Emits the `xvsadd.wu xd, xj, xk` instruction. */
5500
+static void __attribute__((unused))
5501
+tcg_out_opc_xvsadd_wu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5502
+{
5503
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_WU, xd, xj, xk));
5504
+}
5505
+
5506
+/* Emits the `xvsadd.du xd, xj, xk` instruction. */
5507
+static void __attribute__((unused))
5508
+tcg_out_opc_xvsadd_du(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5509
+{
5510
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSADD_DU, xd, xj, xk));
5511
+}
5512
+
5513
+/* Emits the `xvssub.bu xd, xj, xk` instruction. */
5514
+static void __attribute__((unused))
5515
+tcg_out_opc_xvssub_bu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5516
+{
5517
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_BU, xd, xj, xk));
5518
+}
5519
+
5520
+/* Emits the `xvssub.hu xd, xj, xk` instruction. */
5521
+static void __attribute__((unused))
5522
+tcg_out_opc_xvssub_hu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5523
+{
5524
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_HU, xd, xj, xk));
5525
+}
5526
+
5527
+/* Emits the `xvssub.wu xd, xj, xk` instruction. */
5528
+static void __attribute__((unused))
5529
+tcg_out_opc_xvssub_wu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5530
+{
5531
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_WU, xd, xj, xk));
5532
+}
5533
+
5534
+/* Emits the `xvssub.du xd, xj, xk` instruction. */
5535
+static void __attribute__((unused))
5536
+tcg_out_opc_xvssub_du(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5537
+{
5538
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSSUB_DU, xd, xj, xk));
5539
+}
5540
+
5541
+/* Emits the `xvmax.b xd, xj, xk` instruction. */
5542
+static void __attribute__((unused))
5543
+tcg_out_opc_xvmax_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5544
+{
5545
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_B, xd, xj, xk));
5546
+}
5547
+
5548
+/* Emits the `xvmax.h xd, xj, xk` instruction. */
5549
+static void __attribute__((unused))
5550
+tcg_out_opc_xvmax_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5551
+{
5552
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_H, xd, xj, xk));
5553
+}
5554
+
5555
+/* Emits the `xvmax.w xd, xj, xk` instruction. */
5556
+static void __attribute__((unused))
5557
+tcg_out_opc_xvmax_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5558
+{
5559
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_W, xd, xj, xk));
5560
+}
5561
+
5562
+/* Emits the `xvmax.d xd, xj, xk` instruction. */
5563
+static void __attribute__((unused))
5564
+tcg_out_opc_xvmax_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5565
+{
5566
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_D, xd, xj, xk));
5567
+}
5568
+
5569
+/* Emits the `xvmin.b xd, xj, xk` instruction. */
5570
+static void __attribute__((unused))
5571
+tcg_out_opc_xvmin_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5572
+{
5573
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_B, xd, xj, xk));
5574
+}
5575
+
5576
+/* Emits the `xvmin.h xd, xj, xk` instruction. */
5577
+static void __attribute__((unused))
5578
+tcg_out_opc_xvmin_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5579
+{
5580
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_H, xd, xj, xk));
5581
+}
5582
+
5583
+/* Emits the `xvmin.w xd, xj, xk` instruction. */
5584
+static void __attribute__((unused))
5585
+tcg_out_opc_xvmin_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5586
+{
5587
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_W, xd, xj, xk));
5588
+}
5589
+
5590
+/* Emits the `xvmin.d xd, xj, xk` instruction. */
5591
+static void __attribute__((unused))
5592
+tcg_out_opc_xvmin_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5593
+{
5594
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_D, xd, xj, xk));
5595
+}
5596
+
5597
+/* Emits the `xvmax.bu xd, xj, xk` instruction. */
5598
+static void __attribute__((unused))
5599
+tcg_out_opc_xvmax_bu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5600
+{
5601
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_BU, xd, xj, xk));
5602
+}
5603
+
5604
+/* Emits the `xvmax.hu xd, xj, xk` instruction. */
5605
+static void __attribute__((unused))
5606
+tcg_out_opc_xvmax_hu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5607
+{
5608
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_HU, xd, xj, xk));
5609
+}
5610
+
5611
+/* Emits the `xvmax.wu xd, xj, xk` instruction. */
5612
+static void __attribute__((unused))
5613
+tcg_out_opc_xvmax_wu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5614
+{
5615
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_WU, xd, xj, xk));
5616
+}
5617
+
5618
+/* Emits the `xvmax.du xd, xj, xk` instruction. */
5619
+static void __attribute__((unused))
5620
+tcg_out_opc_xvmax_du(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5621
+{
5622
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMAX_DU, xd, xj, xk));
5623
+}
5624
+
5625
+/* Emits the `xvmin.bu xd, xj, xk` instruction. */
5626
+static void __attribute__((unused))
5627
+tcg_out_opc_xvmin_bu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5628
+{
5629
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_BU, xd, xj, xk));
5630
+}
5631
+
5632
+/* Emits the `xvmin.hu xd, xj, xk` instruction. */
5633
+static void __attribute__((unused))
5634
+tcg_out_opc_xvmin_hu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5635
+{
5636
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_HU, xd, xj, xk));
5637
+}
5638
+
5639
+/* Emits the `xvmin.wu xd, xj, xk` instruction. */
5640
+static void __attribute__((unused))
5641
+tcg_out_opc_xvmin_wu(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5642
+{
5643
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_WU, xd, xj, xk));
5644
+}
5645
+
5646
+/* Emits the `xvmin.du xd, xj, xk` instruction. */
5647
+static void __attribute__((unused))
5648
+tcg_out_opc_xvmin_du(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5649
+{
5650
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMIN_DU, xd, xj, xk));
5651
+}
5652
+
5653
+/* Emits the `xvmul.b xd, xj, xk` instruction. */
5654
+static void __attribute__((unused))
5655
+tcg_out_opc_xvmul_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5656
+{
5657
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMUL_B, xd, xj, xk));
5658
+}
5659
+
5660
+/* Emits the `xvmul.h xd, xj, xk` instruction. */
5661
+static void __attribute__((unused))
5662
+tcg_out_opc_xvmul_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5663
+{
5664
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMUL_H, xd, xj, xk));
5665
+}
5666
+
5667
+/* Emits the `xvmul.w xd, xj, xk` instruction. */
5668
+static void __attribute__((unused))
5669
+tcg_out_opc_xvmul_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5670
+{
5671
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMUL_W, xd, xj, xk));
5672
+}
5673
+
5674
+/* Emits the `xvmul.d xd, xj, xk` instruction. */
5675
+static void __attribute__((unused))
5676
+tcg_out_opc_xvmul_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5677
+{
5678
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVMUL_D, xd, xj, xk));
5679
+}
5680
+
5681
+/* Emits the `xvsll.b xd, xj, xk` instruction. */
5682
+static void __attribute__((unused))
5683
+tcg_out_opc_xvsll_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5684
+{
5685
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLL_B, xd, xj, xk));
5686
+}
5687
+
5688
+/* Emits the `xvsll.h xd, xj, xk` instruction. */
5689
+static void __attribute__((unused))
5690
+tcg_out_opc_xvsll_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5691
+{
5692
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLL_H, xd, xj, xk));
5693
+}
5694
+
5695
+/* Emits the `xvsll.w xd, xj, xk` instruction. */
5696
+static void __attribute__((unused))
5697
+tcg_out_opc_xvsll_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5698
+{
5699
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLL_W, xd, xj, xk));
5700
+}
5701
+
5702
+/* Emits the `xvsll.d xd, xj, xk` instruction. */
5703
+static void __attribute__((unused))
5704
+tcg_out_opc_xvsll_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5705
+{
5706
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSLL_D, xd, xj, xk));
5707
+}
5708
+
5709
+/* Emits the `xvsrl.b xd, xj, xk` instruction. */
5710
+static void __attribute__((unused))
5711
+tcg_out_opc_xvsrl_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5712
+{
5713
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRL_B, xd, xj, xk));
5714
+}
5715
+
5716
+/* Emits the `xvsrl.h xd, xj, xk` instruction. */
5717
+static void __attribute__((unused))
5718
+tcg_out_opc_xvsrl_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5719
+{
5720
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRL_H, xd, xj, xk));
5721
+}
5722
+
5723
+/* Emits the `xvsrl.w xd, xj, xk` instruction. */
5724
+static void __attribute__((unused))
5725
+tcg_out_opc_xvsrl_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5726
+{
5727
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRL_W, xd, xj, xk));
5728
+}
5729
+
5730
+/* Emits the `xvsrl.d xd, xj, xk` instruction. */
5731
+static void __attribute__((unused))
5732
+tcg_out_opc_xvsrl_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5733
+{
5734
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRL_D, xd, xj, xk));
5735
+}
5736
+
5737
+/* Emits the `xvsra.b xd, xj, xk` instruction. */
5738
+static void __attribute__((unused))
5739
+tcg_out_opc_xvsra_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5740
+{
5741
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRA_B, xd, xj, xk));
5742
+}
5743
+
5744
+/* Emits the `xvsra.h xd, xj, xk` instruction. */
5745
+static void __attribute__((unused))
5746
+tcg_out_opc_xvsra_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5747
+{
5748
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRA_H, xd, xj, xk));
5749
+}
5750
+
5751
+/* Emits the `xvsra.w xd, xj, xk` instruction. */
5752
+static void __attribute__((unused))
5753
+tcg_out_opc_xvsra_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5754
+{
5755
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRA_W, xd, xj, xk));
5756
+}
5757
+
5758
+/* Emits the `xvsra.d xd, xj, xk` instruction. */
5759
+static void __attribute__((unused))
5760
+tcg_out_opc_xvsra_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5761
+{
5762
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVSRA_D, xd, xj, xk));
5763
+}
5764
+
5765
+/* Emits the `xvrotr.b xd, xj, xk` instruction. */
5766
+static void __attribute__((unused))
5767
+tcg_out_opc_xvrotr_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5768
+{
5769
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVROTR_B, xd, xj, xk));
5770
+}
5771
+
5772
+/* Emits the `xvrotr.h xd, xj, xk` instruction. */
5773
+static void __attribute__((unused))
5774
+tcg_out_opc_xvrotr_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5775
+{
5776
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVROTR_H, xd, xj, xk));
5777
+}
5778
+
5779
+/* Emits the `xvrotr.w xd, xj, xk` instruction. */
5780
+static void __attribute__((unused))
5781
+tcg_out_opc_xvrotr_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5782
+{
5783
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVROTR_W, xd, xj, xk));
5784
+}
5785
+
5786
+/* Emits the `xvrotr.d xd, xj, xk` instruction. */
5787
+static void __attribute__((unused))
5788
+tcg_out_opc_xvrotr_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5789
+{
5790
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVROTR_D, xd, xj, xk));
5791
+}
5792
+
5793
+/* Emits the `xvreplve.b xd, xj, k` instruction. */
5794
+static void __attribute__((unused))
5795
+tcg_out_opc_xvreplve_b(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg k)
5796
+{
5797
+ tcg_out32(s, encode_xdxjk_insn(OPC_XVREPLVE_B, xd, xj, k));
5798
+}
5799
+
5800
+/* Emits the `xvreplve.h xd, xj, k` instruction. */
5801
+static void __attribute__((unused))
5802
+tcg_out_opc_xvreplve_h(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg k)
5803
+{
5804
+ tcg_out32(s, encode_xdxjk_insn(OPC_XVREPLVE_H, xd, xj, k));
5805
+}
5806
+
5807
+/* Emits the `xvreplve.w xd, xj, k` instruction. */
5808
+static void __attribute__((unused))
5809
+tcg_out_opc_xvreplve_w(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg k)
5810
+{
5811
+ tcg_out32(s, encode_xdxjk_insn(OPC_XVREPLVE_W, xd, xj, k));
5812
+}
5813
+
5814
+/* Emits the `xvreplve.d xd, xj, k` instruction. */
5815
+static void __attribute__((unused))
5816
+tcg_out_opc_xvreplve_d(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg k)
5817
+{
5818
+ tcg_out32(s, encode_xdxjk_insn(OPC_XVREPLVE_D, xd, xj, k));
5819
+}
5820
+
5821
+/* Emits the `xvand.v xd, xj, xk` instruction. */
5822
+static void __attribute__((unused))
5823
+tcg_out_opc_xvand_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5824
+{
5825
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVAND_V, xd, xj, xk));
5826
+}
5827
+
5828
+/* Emits the `xvor.v xd, xj, xk` instruction. */
5829
+static void __attribute__((unused))
5830
+tcg_out_opc_xvor_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5831
+{
5832
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVOR_V, xd, xj, xk));
5833
+}
5834
+
5835
+/* Emits the `xvxor.v xd, xj, xk` instruction. */
5836
+static void __attribute__((unused))
5837
+tcg_out_opc_xvxor_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5838
+{
5839
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVXOR_V, xd, xj, xk));
5840
+}
5841
+
5842
+/* Emits the `xvnor.v xd, xj, xk` instruction. */
5843
+static void __attribute__((unused))
5844
+tcg_out_opc_xvnor_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5845
+{
5846
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVNOR_V, xd, xj, xk));
5847
+}
5848
+
5849
+/* Emits the `xvandn.v xd, xj, xk` instruction. */
5850
+static void __attribute__((unused))
5851
+tcg_out_opc_xvandn_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5852
+{
5853
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVANDN_V, xd, xj, xk));
5854
+}
5855
+
5856
+/* Emits the `xvorn.v xd, xj, xk` instruction. */
5857
+static void __attribute__((unused))
5858
+tcg_out_opc_xvorn_v(TCGContext *s, TCGReg xd, TCGReg xj, TCGReg xk)
5859
+{
5860
+ tcg_out32(s, encode_xdxjxk_insn(OPC_XVORN_V, xd, xj, xk));
5861
+}
5862
+
5863
+/* Emits the `xvseqi.b xd, xj, sk5` instruction. */
5864
+static void __attribute__((unused))
5865
+tcg_out_opc_xvseqi_b(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5866
+{
5867
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSEQI_B, xd, xj, sk5));
5868
+}
5869
+
5870
+/* Emits the `xvseqi.h xd, xj, sk5` instruction. */
5871
+static void __attribute__((unused))
5872
+tcg_out_opc_xvseqi_h(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5873
+{
5874
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSEQI_H, xd, xj, sk5));
5875
+}
5876
+
5877
+/* Emits the `xvseqi.w xd, xj, sk5` instruction. */
5878
+static void __attribute__((unused))
5879
+tcg_out_opc_xvseqi_w(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5880
+{
5881
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSEQI_W, xd, xj, sk5));
5882
+}
5883
+
5884
+/* Emits the `xvseqi.d xd, xj, sk5` instruction. */
5885
+static void __attribute__((unused))
5886
+tcg_out_opc_xvseqi_d(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5887
+{
5888
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSEQI_D, xd, xj, sk5));
5889
+}
5890
+
5891
+/* Emits the `xvslei.b xd, xj, sk5` instruction. */
5892
+static void __attribute__((unused))
5893
+tcg_out_opc_xvslei_b(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5894
+{
5895
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLEI_B, xd, xj, sk5));
5896
+}
5897
+
5898
+/* Emits the `xvslei.h xd, xj, sk5` instruction. */
5899
+static void __attribute__((unused))
5900
+tcg_out_opc_xvslei_h(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5901
+{
5902
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLEI_H, xd, xj, sk5));
5903
+}
5904
+
5905
+/* Emits the `xvslei.w xd, xj, sk5` instruction. */
5906
+static void __attribute__((unused))
5907
+tcg_out_opc_xvslei_w(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5908
+{
5909
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLEI_W, xd, xj, sk5));
5910
+}
5911
+
5912
+/* Emits the `xvslei.d xd, xj, sk5` instruction. */
5913
+static void __attribute__((unused))
5914
+tcg_out_opc_xvslei_d(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5915
+{
5916
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLEI_D, xd, xj, sk5));
5917
+}
5918
+
5919
+/* Emits the `xvslei.bu xd, xj, uk5` instruction. */
5920
+static void __attribute__((unused))
5921
+tcg_out_opc_xvslei_bu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5922
+{
5923
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLEI_BU, xd, xj, uk5));
5924
+}
5925
+
5926
+/* Emits the `xvslei.hu xd, xj, uk5` instruction. */
5927
+static void __attribute__((unused))
5928
+tcg_out_opc_xvslei_hu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5929
+{
5930
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLEI_HU, xd, xj, uk5));
5931
+}
5932
+
5933
+/* Emits the `xvslei.wu xd, xj, uk5` instruction. */
5934
+static void __attribute__((unused))
5935
+tcg_out_opc_xvslei_wu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5936
+{
5937
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLEI_WU, xd, xj, uk5));
5938
+}
5939
+
5940
+/* Emits the `xvslei.du xd, xj, uk5` instruction. */
5941
+static void __attribute__((unused))
5942
+tcg_out_opc_xvslei_du(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5943
+{
5944
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLEI_DU, xd, xj, uk5));
5945
+}
5946
+
5947
+/* Emits the `xvslti.b xd, xj, sk5` instruction. */
5948
+static void __attribute__((unused))
5949
+tcg_out_opc_xvslti_b(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5950
+{
5951
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLTI_B, xd, xj, sk5));
5952
+}
5953
+
5954
+/* Emits the `xvslti.h xd, xj, sk5` instruction. */
5955
+static void __attribute__((unused))
5956
+tcg_out_opc_xvslti_h(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5957
+{
5958
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLTI_H, xd, xj, sk5));
5959
+}
5960
+
5961
+/* Emits the `xvslti.w xd, xj, sk5` instruction. */
5962
+static void __attribute__((unused))
5963
+tcg_out_opc_xvslti_w(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5964
+{
5965
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLTI_W, xd, xj, sk5));
5966
+}
5967
+
5968
+/* Emits the `xvslti.d xd, xj, sk5` instruction. */
5969
+static void __attribute__((unused))
5970
+tcg_out_opc_xvslti_d(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
5971
+{
5972
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVSLTI_D, xd, xj, sk5));
5973
+}
5974
+
5975
+/* Emits the `xvslti.bu xd, xj, uk5` instruction. */
5976
+static void __attribute__((unused))
5977
+tcg_out_opc_xvslti_bu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5978
+{
5979
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLTI_BU, xd, xj, uk5));
5980
+}
5981
+
5982
+/* Emits the `xvslti.hu xd, xj, uk5` instruction. */
5983
+static void __attribute__((unused))
5984
+tcg_out_opc_xvslti_hu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5985
+{
5986
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLTI_HU, xd, xj, uk5));
5987
+}
5988
+
5989
+/* Emits the `xvslti.wu xd, xj, uk5` instruction. */
5990
+static void __attribute__((unused))
5991
+tcg_out_opc_xvslti_wu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5992
+{
5993
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLTI_WU, xd, xj, uk5));
5994
+}
5995
+
5996
+/* Emits the `xvslti.du xd, xj, uk5` instruction. */
5997
+static void __attribute__((unused))
5998
+tcg_out_opc_xvslti_du(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
5999
+{
6000
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLTI_DU, xd, xj, uk5));
6001
+}
6002
+
6003
+/* Emits the `xvaddi.bu xd, xj, uk5` instruction. */
6004
+static void __attribute__((unused))
6005
+tcg_out_opc_xvaddi_bu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6006
+{
6007
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVADDI_BU, xd, xj, uk5));
6008
+}
6009
+
6010
+/* Emits the `xvaddi.hu xd, xj, uk5` instruction. */
6011
+static void __attribute__((unused))
6012
+tcg_out_opc_xvaddi_hu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6013
+{
6014
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVADDI_HU, xd, xj, uk5));
6015
+}
6016
+
6017
+/* Emits the `xvaddi.wu xd, xj, uk5` instruction. */
6018
+static void __attribute__((unused))
6019
+tcg_out_opc_xvaddi_wu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6020
+{
6021
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVADDI_WU, xd, xj, uk5));
6022
+}
6023
+
6024
+/* Emits the `xvaddi.du xd, xj, uk5` instruction. */
6025
+static void __attribute__((unused))
6026
+tcg_out_opc_xvaddi_du(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6027
+{
6028
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVADDI_DU, xd, xj, uk5));
6029
+}
6030
+
6031
+/* Emits the `xvsubi.bu xd, xj, uk5` instruction. */
6032
+static void __attribute__((unused))
6033
+tcg_out_opc_xvsubi_bu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6034
+{
6035
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSUBI_BU, xd, xj, uk5));
6036
+}
6037
+
6038
+/* Emits the `xvsubi.hu xd, xj, uk5` instruction. */
6039
+static void __attribute__((unused))
6040
+tcg_out_opc_xvsubi_hu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6041
+{
6042
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSUBI_HU, xd, xj, uk5));
6043
+}
6044
+
6045
+/* Emits the `xvsubi.wu xd, xj, uk5` instruction. */
6046
+static void __attribute__((unused))
6047
+tcg_out_opc_xvsubi_wu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6048
+{
6049
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSUBI_WU, xd, xj, uk5));
6050
+}
6051
+
6052
+/* Emits the `xvsubi.du xd, xj, uk5` instruction. */
6053
+static void __attribute__((unused))
6054
+tcg_out_opc_xvsubi_du(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6055
+{
6056
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSUBI_DU, xd, xj, uk5));
6057
+}
6058
+
6059
+/* Emits the `xvmaxi.b xd, xj, sk5` instruction. */
6060
+static void __attribute__((unused))
6061
+tcg_out_opc_xvmaxi_b(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6062
+{
6063
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMAXI_B, xd, xj, sk5));
6064
+}
6065
+
6066
+/* Emits the `xvmaxi.h xd, xj, sk5` instruction. */
6067
+static void __attribute__((unused))
6068
+tcg_out_opc_xvmaxi_h(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6069
+{
6070
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMAXI_H, xd, xj, sk5));
6071
+}
6072
+
6073
+/* Emits the `xvmaxi.w xd, xj, sk5` instruction. */
6074
+static void __attribute__((unused))
6075
+tcg_out_opc_xvmaxi_w(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6076
+{
6077
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMAXI_W, xd, xj, sk5));
6078
+}
6079
+
6080
+/* Emits the `xvmaxi.d xd, xj, sk5` instruction. */
6081
+static void __attribute__((unused))
6082
+tcg_out_opc_xvmaxi_d(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6083
+{
6084
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMAXI_D, xd, xj, sk5));
6085
+}
6086
+
6087
+/* Emits the `xvmini.b xd, xj, sk5` instruction. */
6088
+static void __attribute__((unused))
6089
+tcg_out_opc_xvmini_b(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6090
+{
6091
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMINI_B, xd, xj, sk5));
6092
+}
6093
+
6094
+/* Emits the `xvmini.h xd, xj, sk5` instruction. */
6095
+static void __attribute__((unused))
6096
+tcg_out_opc_xvmini_h(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6097
+{
6098
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMINI_H, xd, xj, sk5));
6099
+}
6100
+
6101
+/* Emits the `xvmini.w xd, xj, sk5` instruction. */
6102
+static void __attribute__((unused))
6103
+tcg_out_opc_xvmini_w(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6104
+{
6105
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMINI_W, xd, xj, sk5));
6106
+}
6107
+
6108
+/* Emits the `xvmini.d xd, xj, sk5` instruction. */
6109
+static void __attribute__((unused))
6110
+tcg_out_opc_xvmini_d(TCGContext *s, TCGReg xd, TCGReg xj, int32_t sk5)
6111
+{
6112
+ tcg_out32(s, encode_xdxjsk5_insn(OPC_XVMINI_D, xd, xj, sk5));
6113
+}
6114
+
6115
+/* Emits the `xvmaxi.bu xd, xj, uk5` instruction. */
6116
+static void __attribute__((unused))
6117
+tcg_out_opc_xvmaxi_bu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6118
+{
6119
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMAXI_BU, xd, xj, uk5));
6120
+}
6121
+
6122
+/* Emits the `xvmaxi.hu xd, xj, uk5` instruction. */
6123
+static void __attribute__((unused))
6124
+tcg_out_opc_xvmaxi_hu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6125
+{
6126
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMAXI_HU, xd, xj, uk5));
6127
+}
6128
+
6129
+/* Emits the `xvmaxi.wu xd, xj, uk5` instruction. */
6130
+static void __attribute__((unused))
6131
+tcg_out_opc_xvmaxi_wu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6132
+{
6133
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMAXI_WU, xd, xj, uk5));
6134
+}
6135
+
6136
+/* Emits the `xvmaxi.du xd, xj, uk5` instruction. */
6137
+static void __attribute__((unused))
6138
+tcg_out_opc_xvmaxi_du(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6139
+{
6140
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMAXI_DU, xd, xj, uk5));
6141
+}
6142
+
6143
+/* Emits the `xvmini.bu xd, xj, uk5` instruction. */
6144
+static void __attribute__((unused))
6145
+tcg_out_opc_xvmini_bu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6146
+{
6147
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMINI_BU, xd, xj, uk5));
6148
+}
6149
+
6150
+/* Emits the `xvmini.hu xd, xj, uk5` instruction. */
6151
+static void __attribute__((unused))
6152
+tcg_out_opc_xvmini_hu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6153
+{
6154
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMINI_HU, xd, xj, uk5));
6155
+}
6156
+
6157
+/* Emits the `xvmini.wu xd, xj, uk5` instruction. */
6158
+static void __attribute__((unused))
6159
+tcg_out_opc_xvmini_wu(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6160
+{
6161
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMINI_WU, xd, xj, uk5));
6162
+}
6163
+
6164
+/* Emits the `xvmini.du xd, xj, uk5` instruction. */
6165
+static void __attribute__((unused))
6166
+tcg_out_opc_xvmini_du(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6167
+{
6168
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVMINI_DU, xd, xj, uk5));
6169
+}
6170
+
6171
+/* Emits the `xvneg.b xd, xj` instruction. */
6172
+static void __attribute__((unused))
6173
+tcg_out_opc_xvneg_b(TCGContext *s, TCGReg xd, TCGReg xj)
6174
+{
6175
+ tcg_out32(s, encode_xdxj_insn(OPC_XVNEG_B, xd, xj));
6176
+}
6177
+
6178
+/* Emits the `xvneg.h xd, xj` instruction. */
6179
+static void __attribute__((unused))
6180
+tcg_out_opc_xvneg_h(TCGContext *s, TCGReg xd, TCGReg xj)
6181
+{
6182
+ tcg_out32(s, encode_xdxj_insn(OPC_XVNEG_H, xd, xj));
6183
+}
6184
+
6185
+/* Emits the `xvneg.w xd, xj` instruction. */
6186
+static void __attribute__((unused))
6187
+tcg_out_opc_xvneg_w(TCGContext *s, TCGReg xd, TCGReg xj)
6188
+{
6189
+ tcg_out32(s, encode_xdxj_insn(OPC_XVNEG_W, xd, xj));
6190
+}
6191
+
6192
+/* Emits the `xvneg.d xd, xj` instruction. */
6193
+static void __attribute__((unused))
6194
+tcg_out_opc_xvneg_d(TCGContext *s, TCGReg xd, TCGReg xj)
6195
+{
6196
+ tcg_out32(s, encode_xdxj_insn(OPC_XVNEG_D, xd, xj));
6197
+}
6198
+
6199
+/* Emits the `xvreplgr2vr.b xd, j` instruction. */
6200
+static void __attribute__((unused))
6201
+tcg_out_opc_xvreplgr2vr_b(TCGContext *s, TCGReg xd, TCGReg j)
6202
+{
6203
+ tcg_out32(s, encode_xdj_insn(OPC_XVREPLGR2VR_B, xd, j));
6204
+}
6205
+
6206
+/* Emits the `xvreplgr2vr.h xd, j` instruction. */
6207
+static void __attribute__((unused))
6208
+tcg_out_opc_xvreplgr2vr_h(TCGContext *s, TCGReg xd, TCGReg j)
6209
+{
6210
+ tcg_out32(s, encode_xdj_insn(OPC_XVREPLGR2VR_H, xd, j));
6211
+}
6212
+
6213
+/* Emits the `xvreplgr2vr.w xd, j` instruction. */
6214
+static void __attribute__((unused))
6215
+tcg_out_opc_xvreplgr2vr_w(TCGContext *s, TCGReg xd, TCGReg j)
6216
+{
6217
+ tcg_out32(s, encode_xdj_insn(OPC_XVREPLGR2VR_W, xd, j));
6218
+}
6219
+
6220
+/* Emits the `xvreplgr2vr.d xd, j` instruction. */
6221
+static void __attribute__((unused))
6222
+tcg_out_opc_xvreplgr2vr_d(TCGContext *s, TCGReg xd, TCGReg j)
6223
+{
6224
+ tcg_out32(s, encode_xdj_insn(OPC_XVREPLGR2VR_D, xd, j));
6225
+}
6226
+
6227
+/* Emits the `xvrotri.b xd, xj, uk3` instruction. */
6228
+static void __attribute__((unused))
6229
+tcg_out_opc_xvrotri_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6230
+{
6231
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVROTRI_B, xd, xj, uk3));
6232
+}
6233
+
6234
+/* Emits the `xvrotri.h xd, xj, uk4` instruction. */
6235
+static void __attribute__((unused))
6236
+tcg_out_opc_xvrotri_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6237
+{
6238
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVROTRI_H, xd, xj, uk4));
6239
+}
6240
+
6241
+/* Emits the `xvrotri.w xd, xj, uk5` instruction. */
6242
+static void __attribute__((unused))
6243
+tcg_out_opc_xvrotri_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6244
+{
6245
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVROTRI_W, xd, xj, uk5));
6246
+}
6247
+
6248
+/* Emits the `xvrotri.d xd, xj, uk6` instruction. */
6249
+static void __attribute__((unused))
6250
+tcg_out_opc_xvrotri_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6251
+{
6252
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVROTRI_D, xd, xj, uk6));
6253
+}
6254
+
6255
+/* Emits the `xvinsgr2vr.w xd, j, uk3` instruction. */
6256
+static void __attribute__((unused))
6257
+tcg_out_opc_xvinsgr2vr_w(TCGContext *s, TCGReg xd, TCGReg j, uint32_t uk3)
6258
+{
6259
+ tcg_out32(s, encode_xdjuk3_insn(OPC_XVINSGR2VR_W, xd, j, uk3));
6260
+}
6261
+
6262
+/* Emits the `xvinsgr2vr.d xd, j, uk2` instruction. */
6263
+static void __attribute__((unused))
6264
+tcg_out_opc_xvinsgr2vr_d(TCGContext *s, TCGReg xd, TCGReg j, uint32_t uk2)
6265
+{
6266
+ tcg_out32(s, encode_xdjuk2_insn(OPC_XVINSGR2VR_D, xd, j, uk2));
6267
+}
6268
+
6269
+/* Emits the `xvpickve2gr.w d, xj, uk3` instruction. */
6270
+static void __attribute__((unused))
6271
+tcg_out_opc_xvpickve2gr_w(TCGContext *s, TCGReg d, TCGReg xj, uint32_t uk3)
6272
+{
6273
+ tcg_out32(s, encode_dxjuk3_insn(OPC_XVPICKVE2GR_W, d, xj, uk3));
6274
+}
6275
+
6276
+/* Emits the `xvpickve2gr.d d, xj, uk2` instruction. */
6277
+static void __attribute__((unused))
6278
+tcg_out_opc_xvpickve2gr_d(TCGContext *s, TCGReg d, TCGReg xj, uint32_t uk2)
6279
+{
6280
+ tcg_out32(s, encode_dxjuk2_insn(OPC_XVPICKVE2GR_D, d, xj, uk2));
6281
+}
6282
+
6283
+/* Emits the `xvpickve2gr.wu d, xj, uk3` instruction. */
6284
+static void __attribute__((unused))
6285
+tcg_out_opc_xvpickve2gr_wu(TCGContext *s, TCGReg d, TCGReg xj, uint32_t uk3)
6286
+{
6287
+ tcg_out32(s, encode_dxjuk3_insn(OPC_XVPICKVE2GR_WU, d, xj, uk3));
6288
+}
6289
+
6290
+/* Emits the `xvpickve2gr.du d, xj, uk2` instruction. */
6291
+static void __attribute__((unused))
6292
+tcg_out_opc_xvpickve2gr_du(TCGContext *s, TCGReg d, TCGReg xj, uint32_t uk2)
6293
+{
6294
+ tcg_out32(s, encode_dxjuk2_insn(OPC_XVPICKVE2GR_DU, d, xj, uk2));
6295
+}
6296
+
6297
+/* Emits the `xvrepl128vei.b xd, xj, uk4` instruction. */
6298
+static void __attribute__((unused))
6299
+tcg_out_opc_xvrepl128vei_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6300
+{
6301
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVREPL128VEI_B, xd, xj, uk4));
6302
+}
6303
+
6304
+/* Emits the `xvrepl128vei.h xd, xj, uk3` instruction. */
6305
+static void __attribute__((unused))
6306
+tcg_out_opc_xvrepl128vei_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6307
+{
6308
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVREPL128VEI_H, xd, xj, uk3));
6309
+}
6310
+
6311
+/* Emits the `xvrepl128vei.w xd, xj, uk2` instruction. */
6312
+static void __attribute__((unused))
6313
+tcg_out_opc_xvrepl128vei_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk2)
6314
+{
6315
+ tcg_out32(s, encode_xdxjuk2_insn(OPC_XVREPL128VEI_W, xd, xj, uk2));
6316
+}
6317
+
6318
+/* Emits the `xvrepl128vei.d xd, xj, uk1` instruction. */
6319
+static void __attribute__((unused))
6320
+tcg_out_opc_xvrepl128vei_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk1)
6321
+{
6322
+ tcg_out32(s, encode_xdxjuk1_insn(OPC_XVREPL128VEI_D, xd, xj, uk1));
6323
+}
6324
+
6325
+/* Emits the `xvreplve0.b xd, xj` instruction. */
6326
+static void __attribute__((unused))
6327
+tcg_out_opc_xvreplve0_b(TCGContext *s, TCGReg xd, TCGReg xj)
6328
+{
6329
+ tcg_out32(s, encode_xdxj_insn(OPC_XVREPLVE0_B, xd, xj));
6330
+}
6331
+
6332
+/* Emits the `xvreplve0.h xd, xj` instruction. */
6333
+static void __attribute__((unused))
6334
+tcg_out_opc_xvreplve0_h(TCGContext *s, TCGReg xd, TCGReg xj)
6335
+{
6336
+ tcg_out32(s, encode_xdxj_insn(OPC_XVREPLVE0_H, xd, xj));
6337
+}
6338
+
6339
+/* Emits the `xvreplve0.w xd, xj` instruction. */
6340
+static void __attribute__((unused))
6341
+tcg_out_opc_xvreplve0_w(TCGContext *s, TCGReg xd, TCGReg xj)
6342
+{
6343
+ tcg_out32(s, encode_xdxj_insn(OPC_XVREPLVE0_W, xd, xj));
6344
+}
6345
+
6346
+/* Emits the `xvreplve0.d xd, xj` instruction. */
6347
+static void __attribute__((unused))
6348
+tcg_out_opc_xvreplve0_d(TCGContext *s, TCGReg xd, TCGReg xj)
6349
+{
6350
+ tcg_out32(s, encode_xdxj_insn(OPC_XVREPLVE0_D, xd, xj));
6351
+}
6352
+
6353
+/* Emits the `xvreplve0.q xd, xj` instruction. */
6354
+static void __attribute__((unused))
6355
+tcg_out_opc_xvreplve0_q(TCGContext *s, TCGReg xd, TCGReg xj)
6356
+{
6357
+ tcg_out32(s, encode_xdxj_insn(OPC_XVREPLVE0_Q, xd, xj));
6358
+}
6359
+
6360
+/* Emits the `xvbitclri.b xd, xj, uk3` instruction. */
6361
+static void __attribute__((unused))
6362
+tcg_out_opc_xvbitclri_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6363
+{
6364
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVBITCLRI_B, xd, xj, uk3));
6365
+}
6366
+
6367
+/* Emits the `xvbitclri.h xd, xj, uk4` instruction. */
6368
+static void __attribute__((unused))
6369
+tcg_out_opc_xvbitclri_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6370
+{
6371
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVBITCLRI_H, xd, xj, uk4));
6372
+}
6373
+
6374
+/* Emits the `xvbitclri.w xd, xj, uk5` instruction. */
6375
+static void __attribute__((unused))
6376
+tcg_out_opc_xvbitclri_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6377
+{
6378
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVBITCLRI_W, xd, xj, uk5));
6379
+}
6380
+
6381
+/* Emits the `xvbitclri.d xd, xj, uk6` instruction. */
6382
+static void __attribute__((unused))
6383
+tcg_out_opc_xvbitclri_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6384
+{
6385
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVBITCLRI_D, xd, xj, uk6));
6386
+}
6387
+
6388
+/* Emits the `xvbitseti.b xd, xj, uk3` instruction. */
6389
+static void __attribute__((unused))
6390
+tcg_out_opc_xvbitseti_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6391
+{
6392
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVBITSETI_B, xd, xj, uk3));
6393
+}
6394
+
6395
+/* Emits the `xvbitseti.h xd, xj, uk4` instruction. */
6396
+static void __attribute__((unused))
6397
+tcg_out_opc_xvbitseti_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6398
+{
6399
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVBITSETI_H, xd, xj, uk4));
6400
+}
6401
+
6402
+/* Emits the `xvbitseti.w xd, xj, uk5` instruction. */
6403
+static void __attribute__((unused))
6404
+tcg_out_opc_xvbitseti_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6405
+{
6406
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVBITSETI_W, xd, xj, uk5));
6407
+}
6408
+
6409
+/* Emits the `xvbitseti.d xd, xj, uk6` instruction. */
6410
+static void __attribute__((unused))
6411
+tcg_out_opc_xvbitseti_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6412
+{
6413
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVBITSETI_D, xd, xj, uk6));
6414
+}
6415
+
6416
+/* Emits the `xvbitrevi.b xd, xj, uk3` instruction. */
6417
+static void __attribute__((unused))
6418
+tcg_out_opc_xvbitrevi_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6419
+{
6420
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVBITREVI_B, xd, xj, uk3));
6421
+}
6422
+
6423
+/* Emits the `xvbitrevi.h xd, xj, uk4` instruction. */
6424
+static void __attribute__((unused))
6425
+tcg_out_opc_xvbitrevi_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6426
+{
6427
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVBITREVI_H, xd, xj, uk4));
6428
+}
6429
+
6430
+/* Emits the `xvbitrevi.w xd, xj, uk5` instruction. */
6431
+static void __attribute__((unused))
6432
+tcg_out_opc_xvbitrevi_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6433
+{
6434
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVBITREVI_W, xd, xj, uk5));
6435
+}
6436
+
6437
+/* Emits the `xvbitrevi.d xd, xj, uk6` instruction. */
6438
+static void __attribute__((unused))
6439
+tcg_out_opc_xvbitrevi_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6440
+{
6441
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVBITREVI_D, xd, xj, uk6));
6442
+}
6443
+
6444
+/* Emits the `xvslli.b xd, xj, uk3` instruction. */
6445
+static void __attribute__((unused))
6446
+tcg_out_opc_xvslli_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6447
+{
6448
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVSLLI_B, xd, xj, uk3));
6449
+}
6450
+
6451
+/* Emits the `xvslli.h xd, xj, uk4` instruction. */
6452
+static void __attribute__((unused))
6453
+tcg_out_opc_xvslli_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6454
+{
6455
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVSLLI_H, xd, xj, uk4));
6456
+}
6457
+
6458
+/* Emits the `xvslli.w xd, xj, uk5` instruction. */
6459
+static void __attribute__((unused))
6460
+tcg_out_opc_xvslli_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6461
+{
6462
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSLLI_W, xd, xj, uk5));
6463
+}
6464
+
6465
+/* Emits the `xvslli.d xd, xj, uk6` instruction. */
6466
+static void __attribute__((unused))
6467
+tcg_out_opc_xvslli_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6468
+{
6469
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVSLLI_D, xd, xj, uk6));
6470
+}
6471
+
6472
+/* Emits the `xvsrli.b xd, xj, uk3` instruction. */
6473
+static void __attribute__((unused))
6474
+tcg_out_opc_xvsrli_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6475
+{
6476
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVSRLI_B, xd, xj, uk3));
6477
+}
6478
+
6479
+/* Emits the `xvsrli.h xd, xj, uk4` instruction. */
6480
+static void __attribute__((unused))
6481
+tcg_out_opc_xvsrli_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6482
+{
6483
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVSRLI_H, xd, xj, uk4));
6484
+}
6485
+
6486
+/* Emits the `xvsrli.w xd, xj, uk5` instruction. */
6487
+static void __attribute__((unused))
6488
+tcg_out_opc_xvsrli_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6489
+{
6490
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSRLI_W, xd, xj, uk5));
6491
+}
6492
+
6493
+/* Emits the `xvsrli.d xd, xj, uk6` instruction. */
6494
+static void __attribute__((unused))
6495
+tcg_out_opc_xvsrli_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6496
+{
6497
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVSRLI_D, xd, xj, uk6));
6498
+}
6499
+
6500
+/* Emits the `xvsrai.b xd, xj, uk3` instruction. */
6501
+static void __attribute__((unused))
6502
+tcg_out_opc_xvsrai_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk3)
6503
+{
6504
+ tcg_out32(s, encode_xdxjuk3_insn(OPC_XVSRAI_B, xd, xj, uk3));
6505
+}
6506
+
6507
+/* Emits the `xvsrai.h xd, xj, uk4` instruction. */
6508
+static void __attribute__((unused))
6509
+tcg_out_opc_xvsrai_h(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk4)
6510
+{
6511
+ tcg_out32(s, encode_xdxjuk4_insn(OPC_XVSRAI_H, xd, xj, uk4));
6512
+}
6513
+
6514
+/* Emits the `xvsrai.w xd, xj, uk5` instruction. */
6515
+static void __attribute__((unused))
6516
+tcg_out_opc_xvsrai_w(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk5)
6517
+{
6518
+ tcg_out32(s, encode_xdxjuk5_insn(OPC_XVSRAI_W, xd, xj, uk5));
6519
+}
6520
+
6521
+/* Emits the `xvsrai.d xd, xj, uk6` instruction. */
6522
+static void __attribute__((unused))
6523
+tcg_out_opc_xvsrai_d(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk6)
6524
+{
6525
+ tcg_out32(s, encode_xdxjuk6_insn(OPC_XVSRAI_D, xd, xj, uk6));
6526
+}
6527
+
6528
+/* Emits the `xvbitseli.b xd, xj, uk8` instruction. */
6529
+static void __attribute__((unused))
6530
+tcg_out_opc_xvbitseli_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk8)
6531
+{
6532
+ tcg_out32(s, encode_xdxjuk8_insn(OPC_XVBITSELI_B, xd, xj, uk8));
6533
+}
6534
+
6535
+/* Emits the `xvandi.b xd, xj, uk8` instruction. */
6536
+static void __attribute__((unused))
6537
+tcg_out_opc_xvandi_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk8)
6538
+{
6539
+ tcg_out32(s, encode_xdxjuk8_insn(OPC_XVANDI_B, xd, xj, uk8));
6540
+}
6541
+
6542
+/* Emits the `xvori.b xd, xj, uk8` instruction. */
6543
+static void __attribute__((unused))
6544
+tcg_out_opc_xvori_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk8)
6545
+{
6546
+ tcg_out32(s, encode_xdxjuk8_insn(OPC_XVORI_B, xd, xj, uk8));
6547
+}
6548
+
6549
+/* Emits the `xvxori.b xd, xj, uk8` instruction. */
6550
+static void __attribute__((unused))
6551
+tcg_out_opc_xvxori_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk8)
6552
+{
6553
+ tcg_out32(s, encode_xdxjuk8_insn(OPC_XVXORI_B, xd, xj, uk8));
6554
+}
6555
+
6556
+/* Emits the `xvnori.b xd, xj, uk8` instruction. */
6557
+static void __attribute__((unused))
6558
+tcg_out_opc_xvnori_b(TCGContext *s, TCGReg xd, TCGReg xj, uint32_t uk8)
6559
+{
6560
+ tcg_out32(s, encode_xdxjuk8_insn(OPC_XVNORI_B, xd, xj, uk8));
6561
+}
6562
+
6563
+/* Emits the `xvldi xd, sj13` instruction. */
6564
+static void __attribute__((unused))
6565
+tcg_out_opc_xvldi(TCGContext *s, TCGReg xd, int32_t sj13)
6566
+{
6567
+ tcg_out32(s, encode_xdsj13_insn(OPC_XVLDI, xd, sj13));
6568
}
6569
6570
/* End of generated code. */
6571
--
87
--
6572
2.34.1
88
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/loongarch64/tcg-target.c.inc | 7 ++++++-
7
include/fpu/softfloat.h | 6 ++++
5
1 file changed, 6 insertions(+), 1 deletion(-)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
6
9
fpu/softfloat-parts.c.inc | 7 +++--
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
14
--- a/include/fpu/softfloat.h
10
+++ b/tcg/loongarch64/tcg-target.c.inc
15
+++ b/include/fpu/softfloat.h
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
12
int64_t value = sextract64(v64, 0, 8 << vece);
17
float16 float16_sub(float16, float16, float_status *status);
13
if (-0x200 <= value && value <= 0x1FF) {
18
float16 float16_mul(float16, float16, float_status *status);
14
uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
15
- tcg_out_opc_vldi(s, rd, imm);
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
16
+
100
+
17
+ if (type == TCG_TYPE_V256) {
101
+float32 QEMU_SOFTFLOAT_ATTR
18
+ tcg_out_opc_xvldi(s, rd, imm);
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
19
+ } else {
103
+ int scale, int flags, float_status *status)
20
+ tcg_out_opc_vldi(s, rd, imm);
104
{
21
+ }
105
FloatParts64 pa, pb, pc, *pr;
22
return;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
23
}
187
}
24
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
25
--
217
--
26
2.34.1
218
2.43.0
219
220
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/loongarch64/tcg-target.c.inc | 22 ++++++----------------
6
target/arm/tcg/helper-a64.c | 6 +++---
6
1 file changed, 6 insertions(+), 16 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
7
8
8
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/loongarch64/tcg-target.c.inc
11
--- a/target/arm/tcg/helper-a64.c
11
+++ b/tcg/loongarch64/tcg-target.c.inc
12
+++ b/target/arm/tcg/helper-a64.c
12
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
13
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
14
(float16_is_infinity(b) && float16_is_zero(a))) {
14
TCGReg rd, TCGReg rs)
15
return float16_one_point_five;
15
{
16
}
16
- switch (vece) {
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
17
- case MO_8:
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
18
- tcg_out_opc_vreplgr2vr_b(s, rd, rs);
19
- break;
20
- case MO_16:
21
- tcg_out_opc_vreplgr2vr_h(s, rd, rs);
22
- break;
23
- case MO_32:
24
- tcg_out_opc_vreplgr2vr_w(s, rd, rs);
25
- break;
26
- case MO_64:
27
- tcg_out_opc_vreplgr2vr_d(s, rd, rs);
28
- break;
29
- default:
30
- g_assert_not_reached();
31
- }
32
+ static const LoongArchInsn repl_insn[4] = {
33
+ OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H, OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D
34
+ };
35
+
36
+ tcg_debug_assert(vece <= MO_64);
37
+ tcg_out32(s, encode_vdj_insn(repl_insn[vece], rd, rs));
38
return true;
39
}
19
}
40
20
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
23
(float32_is_infinity(b) && float32_is_zero(a))) {
24
return float32_one_point_five;
25
}
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
28
}
29
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
33
return float64_one_point_five;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
41
--
40
--
42
2.34.1
41
2.43.0
43
42
44
43
diff view generated by jsdifflib
1
Use TCG_VEC_TMP0 directly.
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
3
Reviewed-by: Song Gao <gaosong@loongson.cn>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/loongarch64/tcg-target.c.inc | 9 ++++-----
6
target/sparc/helper.h | 4 +-
8
1 file changed, 4 insertions(+), 5 deletions(-)
7
target/sparc/fop_helper.c | 8 ++--
9
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/loongarch64/tcg-target.c.inc
13
--- a/target/sparc/helper.h
13
+++ b/tcg/loongarch64/tcg-target.c.inc
14
+++ b/target/sparc/helper.h
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
15
{
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
TCGType type = vecl + TCG_TYPE_V64;
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
17
TCGArg a0, a1, a2, a3;
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
18
- TCGReg temp_vec = TCG_VEC_TMP0;
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
19
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
20
static const LoongArchInsn cmp_vec_insn[16][4] = {
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
21
[TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
23
23
* dupi_vec temp, a2
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
24
* cmp_vec a0, a1, temp, cond
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
25
*/
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
26
- tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
27
- a2 = temp_vec;
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
28
+ tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2);
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
29
+ a2 = TCG_VEC_TMP0;
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
30
}
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
31
32
32
insn = cmp_vec_insn[cond][vece];
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
34
index XXXXXXX..XXXXXXX 100644
34
break;
35
--- a/target/sparc/fop_helper.c
35
case INDEX_op_rotlv_vec:
36
+++ b/target/sparc/fop_helper.c
36
/* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
37
- tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
38
}
38
- a2 = temp_vec;
39
39
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], TCG_VEC_TMP0, a2));
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
40
+ a2 = TCG_VEC_TMP0;
41
- float32 s2, float32 s3, uint32_t op)
41
/* fall through */
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
42
case INDEX_op_rotrv_vec:
43
{
43
insn = rotrv_vec_insn[vece];
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
44
--
205
--
45
2.34.1
206
2.43.0
46
207
47
208
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
All uses have been convered to float*_muladd_scalbn.
2
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/loongarch64/tcg-target.h | 2 +-
6
include/fpu/softfloat.h | 3 ---
6
tcg/loongarch64/tcg-target.c.inc | 3 +++
7
fpu/softfloat.c | 6 ------
7
2 files changed, 4 insertions(+), 1 deletion(-)
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
8
10
9
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/loongarch64/tcg-target.h
13
--- a/include/fpu/softfloat.h
12
+++ b/tcg/loongarch64/tcg-target.h
14
+++ b/include/fpu/softfloat.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum {
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
14
16
| Using these differs from negating an input or output before calling
15
#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX)
17
| the muladd function in that this means that a NaN doesn't have its
16
#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
18
| sign bit inverted before it is propagated.
17
-#define TCG_TARGET_HAS_v256 0
19
-| We also support halving the result before rounding, as a special
18
+#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX)
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
19
21
*----------------------------------------------------------------------------*/
20
#define TCG_TARGET_HAS_not_vec 1
22
enum {
21
#define TCG_TARGET_HAS_neg_vec 1
23
float_muladd_negate_c = 1,
22
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
24
float_muladd_negate_product = 2,
25
float_muladd_negate_result = 4,
26
- float_muladd_halve_result = 8,
27
};
28
29
/*----------------------------------------------------------------------------
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
23
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/loongarch64/tcg-target.c.inc
32
--- a/fpu/softfloat.c
25
+++ b/tcg/loongarch64/tcg-target.c.inc
33
+++ b/fpu/softfloat.c
26
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
27
if (cpuinfo & CPUINFO_LSX) {
35
if (unlikely(!can_use_fpu(s))) {
28
tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
36
goto soft;
29
tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
37
}
30
+ if (cpuinfo & CPUINFO_LASX) {
38
- if (unlikely(flags & float_muladd_halve_result)) {
31
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
39
- goto soft;
32
+ }
40
- }
33
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
41
34
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
35
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
59
a->exp = p_widen.exp;
60
61
return_normal:
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
36
--
69
--
37
2.34.1
70
2.43.0
38
71
39
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
Split out host/bufferiszero.h.inc for x86, aarch64 and generic
1
There are multiple special cases for this instruction.
2
in order to avoid an overlong ifdef ladder.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
3
8
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
11
---
7
util/bufferiszero.c | 191 +------------------
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
8
host/include/aarch64/host/bufferiszero.c.inc | 76 ++++++++
13
1 file changed, 26 insertions(+), 79 deletions(-)
9
host/include/generic/host/bufferiszero.c.inc | 10 +
10
host/include/i386/host/bufferiszero.c.inc | 124 ++++++++++++
11
host/include/x86_64/host/bufferiszero.c.inc | 1 +
12
5 files changed, 212 insertions(+), 190 deletions(-)
13
create mode 100644 host/include/aarch64/host/bufferiszero.c.inc
14
create mode 100644 host/include/generic/host/bufferiszero.c.inc
15
create mode 100644 host/include/i386/host/bufferiszero.c.inc
16
create mode 100644 host/include/x86_64/host/bufferiszero.c.inc
17
14
18
diff --git a/util/bufferiszero.c b/util/bufferiszero.c
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/util/bufferiszero.c
17
--- a/target/hexagon/op_helper.c
21
+++ b/util/bufferiszero.c
18
+++ b/target/hexagon/op_helper.c
22
@@ -XXX,XX +XXX,XX @@ static bool buffer_is_zero_int_ge256(const void *buf, size_t len)
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
23
return t == 0;
20
return RxV;
24
}
21
}
25
22
26
-#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
23
-static bool is_zero_prod(float32 a, float32 b)
27
-#include <immintrin.h>
28
-
29
-/* Helper for preventing the compiler from reassociating
30
- chains of binary vector operations. */
31
-#define SSE_REASSOC_BARRIER(vec0, vec1) asm("" : "+x"(vec0), "+x"(vec1))
32
-
33
-/* Note that these vectorized functions may assume len >= 256. */
34
-
35
-static bool __attribute__((target("sse2")))
36
-buffer_zero_sse2(const void *buf, size_t len)
37
-{
24
-{
38
- /* Unaligned loads at head/tail. */
25
- return ((float32_is_zero(a) && is_finite(b)) ||
39
- __m128i v = *(__m128i_u *)(buf);
26
- (float32_is_zero(b) && is_finite(a)));
40
- __m128i w = *(__m128i_u *)(buf + len - 16);
41
- /* Align head/tail to 16-byte boundaries. */
42
- const __m128i *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
43
- const __m128i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
44
- __m128i zero = { 0 };
45
-
46
- /* Collect a partial block at tail end. */
47
- v |= e[-1]; w |= e[-2];
48
- SSE_REASSOC_BARRIER(v, w);
49
- v |= e[-3]; w |= e[-4];
50
- SSE_REASSOC_BARRIER(v, w);
51
- v |= e[-5]; w |= e[-6];
52
- SSE_REASSOC_BARRIER(v, w);
53
- v |= e[-7]; v |= w;
54
-
55
- /*
56
- * Loop over complete 128-byte blocks.
57
- * With the head and tail removed, e - p >= 14, so the loop
58
- * must iterate at least once.
59
- */
60
- do {
61
- v = _mm_cmpeq_epi8(v, zero);
62
- if (unlikely(_mm_movemask_epi8(v) != 0xFFFF)) {
63
- return false;
64
- }
65
- v = p[0]; w = p[1];
66
- SSE_REASSOC_BARRIER(v, w);
67
- v |= p[2]; w |= p[3];
68
- SSE_REASSOC_BARRIER(v, w);
69
- v |= p[4]; w |= p[5];
70
- SSE_REASSOC_BARRIER(v, w);
71
- v |= p[6]; w |= p[7];
72
- SSE_REASSOC_BARRIER(v, w);
73
- v |= w;
74
- p += 8;
75
- } while (p < e - 7);
76
-
77
- return _mm_movemask_epi8(_mm_cmpeq_epi8(v, zero)) == 0xFFFF;
78
-}
27
-}
79
-
28
-
80
-#ifdef CONFIG_AVX2_OPT
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
81
-static bool __attribute__((target("avx2")))
82
-buffer_zero_avx2(const void *buf, size_t len)
83
-{
30
-{
84
- /* Unaligned loads at head/tail. */
31
- float32 ret = dst;
85
- __m256i v = *(__m256i_u *)(buf);
32
- if (float32_is_any_nan(x)) {
86
- __m256i w = *(__m256i_u *)(buf + len - 32);
33
- if (extract32(x, 22, 1) == 0) {
87
- /* Align head/tail to 32-byte boundaries. */
34
- float_raise(float_flag_invalid, fp_status);
88
- const __m256i *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
89
- const __m256i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32);
90
- __m256i zero = { 0 };
91
-
92
- /* Collect a partial block at tail end. */
93
- v |= e[-1]; w |= e[-2];
94
- SSE_REASSOC_BARRIER(v, w);
95
- v |= e[-3]; w |= e[-4];
96
- SSE_REASSOC_BARRIER(v, w);
97
- v |= e[-5]; w |= e[-6];
98
- SSE_REASSOC_BARRIER(v, w);
99
- v |= e[-7]; v |= w;
100
-
101
- /* Loop over complete 256-byte blocks. */
102
- for (; p < e - 7; p += 8) {
103
- /* PTEST is not profitable here. */
104
- v = _mm256_cmpeq_epi8(v, zero);
105
- if (unlikely(_mm256_movemask_epi8(v) != 0xFFFFFFFF)) {
106
- return false;
107
- }
35
- }
108
- v = p[0]; w = p[1];
36
- ret = make_float32(0xffffffff); /* nan */
109
- SSE_REASSOC_BARRIER(v, w);
110
- v |= p[2]; w |= p[3];
111
- SSE_REASSOC_BARRIER(v, w);
112
- v |= p[4]; w |= p[5];
113
- SSE_REASSOC_BARRIER(v, w);
114
- v |= p[6]; w |= p[7];
115
- SSE_REASSOC_BARRIER(v, w);
116
- v |= w;
117
- }
37
- }
118
-
38
- return ret;
119
- return _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, zero)) == 0xFFFFFFFF;
120
-}
121
-#endif /* CONFIG_AVX2_OPT */
122
-
123
-static biz_accel_fn const accel_table[] = {
124
- buffer_is_zero_int_ge256,
125
- buffer_zero_sse2,
126
-#ifdef CONFIG_AVX2_OPT
127
- buffer_zero_avx2,
128
-#endif
129
-};
130
-
131
-static unsigned best_accel(void)
132
-{
133
-#ifdef CONFIG_AVX2_OPT
134
- unsigned info = cpuinfo_init();
135
-
136
- if (info & CPUINFO_AVX2) {
137
- return 2;
138
- }
139
-#endif
140
- return 1;
141
-}
39
-}
142
-
40
-
143
-#elif defined(__aarch64__) && defined(__ARM_NEON)
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
144
-#include <arm_neon.h>
42
float32 RsV, float32 RtV, float32 PuV)
145
-
43
{
146
-/*
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
147
- * Helper for preventing the compiler from reassociating
45
return RxV;
148
- * chains of binary vector operations.
46
}
149
- */
47
150
-#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1))
48
-static bool is_inf_prod(int32_t a, int32_t b)
151
-
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
152
-static bool buffer_is_zero_simd(const void *buf, size_t len)
50
+ float32 RsV, float32 RtV, int negate)
153
-{
51
{
154
- uint32x4_t t0, t1, t2, t3;
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
155
-
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
156
- /* Align head/tail to 16-byte boundaries. */
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
157
- const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
55
+ int flags;
158
- const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
159
-
160
- /* Unaligned loads at head/tail. */
161
- t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16);
162
-
163
- /* Collect a partial block at tail end. */
164
- t1 = e[-7] | e[-6];
165
- t2 = e[-5] | e[-4];
166
- t3 = e[-3] | e[-2];
167
- t0 |= e[-1];
168
- REASSOC_BARRIER(t0, t1);
169
- REASSOC_BARRIER(t2, t3);
170
- t0 |= t1;
171
- t2 |= t3;
172
- REASSOC_BARRIER(t0, t2);
173
- t0 |= t2;
174
-
175
- /*
176
- * Loop over complete 128-byte blocks.
177
- * With the head and tail removed, e - p >= 14, so the loop
178
- * must iterate at least once.
179
- */
180
- do {
181
- /*
182
- * Reduce via UMAXV. Whatever the actual result,
183
- * it will only be zero if all input bytes are zero.
184
- */
185
- if (unlikely(vmaxvq_u32(t0) != 0)) {
186
- return false;
187
- }
188
-
189
- t0 = p[0] | p[1];
190
- t1 = p[2] | p[3];
191
- t2 = p[4] | p[5];
192
- t3 = p[6] | p[7];
193
- REASSOC_BARRIER(t0, t1);
194
- REASSOC_BARRIER(t2, t3);
195
- t0 |= t1;
196
- t2 |= t3;
197
- REASSOC_BARRIER(t0, t2);
198
- t0 |= t2;
199
- p += 8;
200
- } while (p < e - 7);
201
-
202
- return vmaxvq_u32(t0) == 0;
203
-}
204
-
205
-#define best_accel() 1
206
-static biz_accel_fn const accel_table[] = {
207
- buffer_is_zero_int_ge256,
208
- buffer_is_zero_simd,
209
-};
210
-#else
211
-#define best_accel() 0
212
-static biz_accel_fn const accel_table[1] = {
213
- buffer_is_zero_int_ge256
214
-};
215
-#endif
216
+#include "host/bufferiszero.c.inc"
217
218
static biz_accel_fn buffer_is_zero_accel;
219
static unsigned accel_index;
220
diff --git a/host/include/aarch64/host/bufferiszero.c.inc b/host/include/aarch64/host/bufferiszero.c.inc
221
new file mode 100644
222
index XXXXXXX..XXXXXXX
223
--- /dev/null
224
+++ b/host/include/aarch64/host/bufferiszero.c.inc
225
@@ -XXX,XX +XXX,XX @@
226
+/*
227
+ * SPDX-License-Identifier: GPL-2.0-or-later
228
+ * buffer_is_zero acceleration, aarch64 version.
229
+ */
230
+
56
+
231
+#ifdef __ARM_NEON
57
+ arch_fpop_start(env);
232
+#include <arm_neon.h>
233
+
58
+
234
+/*
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
235
+ * Helper for preventing the compiler from reassociating
60
+ RxV = float32_muladd(RsV, RtV, RxV,
236
+ * chains of binary vector operations.
61
+ negate | float_muladd_suppress_add_product_zero,
237
+ */
62
+ &env->fp_status);
238
+#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1))
239
+
63
+
240
+static bool buffer_is_zero_simd(const void *buf, size_t len)
64
+ flags = get_float_exception_flags(&env->fp_status);
241
+{
65
+ if (flags) {
242
+ uint32x4_t t0, t1, t2, t3;
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
243
+
68
+
244
+ /* Align head/tail to 16-byte boundaries. */
69
+ /* Return 0 for Inf - Inf. */
245
+ const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
70
+ if (flags & float_flag_invalid_isi) {
246
+ const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
71
+ RxV = 0;
247
+
248
+ /* Unaligned loads at head/tail. */
249
+ t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16);
250
+
251
+ /* Collect a partial block at tail end. */
252
+ t1 = e[-7] | e[-6];
253
+ t2 = e[-5] | e[-4];
254
+ t3 = e[-3] | e[-2];
255
+ t0 |= e[-1];
256
+ REASSOC_BARRIER(t0, t1);
257
+ REASSOC_BARRIER(t2, t3);
258
+ t0 |= t1;
259
+ t2 |= t3;
260
+ REASSOC_BARRIER(t0, t2);
261
+ t0 |= t2;
262
+
263
+ /*
264
+ * Loop over complete 128-byte blocks.
265
+ * With the head and tail removed, e - p >= 14, so the loop
266
+ * must iterate at least once.
267
+ */
268
+ do {
269
+ /*
270
+ * Reduce via UMAXV. Whatever the actual result,
271
+ * it will only be zero if all input bytes are zero.
272
+ */
273
+ if (unlikely(vmaxvq_u32(t0) != 0)) {
274
+ return false;
275
+ }
72
+ }
276
+
277
+ t0 = p[0] | p[1];
278
+ t1 = p[2] | p[3];
279
+ t2 = p[4] | p[5];
280
+ t3 = p[6] | p[7];
281
+ REASSOC_BARRIER(t0, t1);
282
+ REASSOC_BARRIER(t2, t3);
283
+ t0 |= t1;
284
+ t2 |= t3;
285
+ REASSOC_BARRIER(t0, t2);
286
+ t0 |= t2;
287
+ p += 8;
288
+ } while (p < e - 7);
289
+
290
+ return vmaxvq_u32(t0) == 0;
291
+}
292
+
293
+static biz_accel_fn const accel_table[] = {
294
+ buffer_is_zero_int_ge256,
295
+ buffer_is_zero_simd,
296
+};
297
+
298
+#define best_accel() 1
299
+#else
300
+# include "host/include/generic/host/bufferiszero.c.inc"
301
+#endif
302
diff --git a/host/include/generic/host/bufferiszero.c.inc b/host/include/generic/host/bufferiszero.c.inc
303
new file mode 100644
304
index XXXXXXX..XXXXXXX
305
--- /dev/null
306
+++ b/host/include/generic/host/bufferiszero.c.inc
307
@@ -XXX,XX +XXX,XX @@
308
+/*
309
+ * SPDX-License-Identifier: GPL-2.0-or-later
310
+ * buffer_is_zero acceleration, generic version.
311
+ */
312
+
313
+static biz_accel_fn const accel_table[1] = {
314
+ buffer_is_zero_int_ge256
315
+};
316
+
317
+#define best_accel() 0
318
diff --git a/host/include/i386/host/bufferiszero.c.inc b/host/include/i386/host/bufferiszero.c.inc
319
new file mode 100644
320
index XXXXXXX..XXXXXXX
321
--- /dev/null
322
+++ b/host/include/i386/host/bufferiszero.c.inc
323
@@ -XXX,XX +XXX,XX @@
324
+/*
325
+ * SPDX-License-Identifier: GPL-2.0-or-later
326
+ * buffer_is_zero acceleration, x86 version.
327
+ */
328
+
329
+#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
330
+#include <immintrin.h>
331
+
332
+/* Helper for preventing the compiler from reassociating
333
+ chains of binary vector operations. */
334
+#define SSE_REASSOC_BARRIER(vec0, vec1) asm("" : "+x"(vec0), "+x"(vec1))
335
+
336
+/* Note that these vectorized functions may assume len >= 256. */
337
+
338
+static bool __attribute__((target("sse2")))
339
+buffer_zero_sse2(const void *buf, size_t len)
340
+{
341
+ /* Unaligned loads at head/tail. */
342
+ __m128i v = *(__m128i_u *)(buf);
343
+ __m128i w = *(__m128i_u *)(buf + len - 16);
344
+ /* Align head/tail to 16-byte boundaries. */
345
+ const __m128i *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
346
+ const __m128i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
347
+ __m128i zero = { 0 };
348
+
349
+ /* Collect a partial block at tail end. */
350
+ v |= e[-1]; w |= e[-2];
351
+ SSE_REASSOC_BARRIER(v, w);
352
+ v |= e[-3]; w |= e[-4];
353
+ SSE_REASSOC_BARRIER(v, w);
354
+ v |= e[-5]; w |= e[-6];
355
+ SSE_REASSOC_BARRIER(v, w);
356
+ v |= e[-7]; v |= w;
357
+
358
+ /*
359
+ * Loop over complete 128-byte blocks.
360
+ * With the head and tail removed, e - p >= 14, so the loop
361
+ * must iterate at least once.
362
+ */
363
+ do {
364
+ v = _mm_cmpeq_epi8(v, zero);
365
+ if (unlikely(_mm_movemask_epi8(v) != 0xFFFF)) {
366
+ return false;
367
+ }
368
+ v = p[0]; w = p[1];
369
+ SSE_REASSOC_BARRIER(v, w);
370
+ v |= p[2]; w |= p[3];
371
+ SSE_REASSOC_BARRIER(v, w);
372
+ v |= p[4]; w |= p[5];
373
+ SSE_REASSOC_BARRIER(v, w);
374
+ v |= p[6]; w |= p[7];
375
+ SSE_REASSOC_BARRIER(v, w);
376
+ v |= w;
377
+ p += 8;
378
+ } while (p < e - 7);
379
+
380
+ return _mm_movemask_epi8(_mm_cmpeq_epi8(v, zero)) == 0xFFFF;
381
+}
382
+
383
+#ifdef CONFIG_AVX2_OPT
384
+static bool __attribute__((target("avx2")))
385
+buffer_zero_avx2(const void *buf, size_t len)
386
+{
387
+ /* Unaligned loads at head/tail. */
388
+ __m256i v = *(__m256i_u *)(buf);
389
+ __m256i w = *(__m256i_u *)(buf + len - 32);
390
+ /* Align head/tail to 32-byte boundaries. */
391
+ const __m256i *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
392
+ const __m256i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32);
393
+ __m256i zero = { 0 };
394
+
395
+ /* Collect a partial block at tail end. */
396
+ v |= e[-1]; w |= e[-2];
397
+ SSE_REASSOC_BARRIER(v, w);
398
+ v |= e[-3]; w |= e[-4];
399
+ SSE_REASSOC_BARRIER(v, w);
400
+ v |= e[-5]; w |= e[-6];
401
+ SSE_REASSOC_BARRIER(v, w);
402
+ v |= e[-7]; v |= w;
403
+
404
+ /* Loop over complete 256-byte blocks. */
405
+ for (; p < e - 7; p += 8) {
406
+ /* PTEST is not profitable here. */
407
+ v = _mm256_cmpeq_epi8(v, zero);
408
+ if (unlikely(_mm256_movemask_epi8(v) != 0xFFFFFFFF)) {
409
+ return false;
410
+ }
411
+ v = p[0]; w = p[1];
412
+ SSE_REASSOC_BARRIER(v, w);
413
+ v |= p[2]; w |= p[3];
414
+ SSE_REASSOC_BARRIER(v, w);
415
+ v |= p[4]; w |= p[5];
416
+ SSE_REASSOC_BARRIER(v, w);
417
+ v |= p[6]; w |= p[7];
418
+ SSE_REASSOC_BARRIER(v, w);
419
+ v |= w;
420
+ }
73
+ }
421
+
74
+
422
+ return _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, zero)) == 0xFFFFFFFF;
75
+ arch_fpop_end(env);
423
+}
76
+ return RxV;
424
+#endif /* CONFIG_AVX2_OPT */
77
}
425
+
78
426
+static biz_accel_fn const accel_table[] = {
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
427
+ buffer_is_zero_int_ge256,
80
float32 RsV, float32 RtV)
428
+ buffer_zero_sse2,
81
{
429
+#ifdef CONFIG_AVX2_OPT
82
- bool infinp;
430
+ buffer_zero_avx2,
83
- bool infminusinf;
431
+#endif
84
- float32 tmp;
432
+};
85
-
433
+
86
- arch_fpop_start(env);
434
+static unsigned best_accel(void)
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
435
+{
88
- infminusinf = float32_is_infinity(RxV) &&
436
+#ifdef CONFIG_AVX2_OPT
89
- is_inf_prod(RsV, RtV) &&
437
+ unsigned info = cpuinfo_init();
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
438
+ if (info & CPUINFO_AVX2) {
91
- infinp = float32_is_infinity(RxV) ||
439
+ return 2;
92
- float32_is_infinity(RtV) ||
440
+ }
93
- float32_is_infinity(RsV);
441
+#endif
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
442
+ return 1;
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
443
+}
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
444
+
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
445
+#else
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
446
+# include "host/include/generic/host/bufferiszero.c.inc"
99
- RxV = tmp;
447
+#endif
100
- }
448
diff --git a/host/include/x86_64/host/bufferiszero.c.inc b/host/include/x86_64/host/bufferiszero.c.inc
101
- set_float_exception_flags(0, &env->fp_status);
449
new file mode 100644
102
- if (float32_is_infinity(RxV) && !infinp) {
450
index XXXXXXX..XXXXXXX
103
- RxV = RxV - 1;
451
--- /dev/null
104
- }
452
+++ b/host/include/x86_64/host/bufferiszero.c.inc
105
- if (infminusinf) {
453
@@ -0,0 +1 @@
106
- RxV = 0;
454
+#include "host/include/i386/host/bufferiszero.c.inc"
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
111
}
112
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
114
float32 RsV, float32 RtV)
115
{
116
- bool infinp;
117
- bool infminusinf;
118
- float32 tmp;
119
-
120
- arch_fpop_start(env);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
146
}
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
455
--
149
--
456
2.34.1
150
2.43.0
457
458
diff view generated by jsdifflib
New patch
1
The function is now unused.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.h | 2 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.h
13
+++ b/target/hexagon/fma_emu.h
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
15
}
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
219
--
220
2.43.0
diff view generated by jsdifflib
New patch
1
This massive macro is now only used once.
2
Expand it for use only by float64.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
8
1 file changed, 127 insertions(+), 128 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
15
}
16
17
/* Return a maximum finite value with the requested sign */
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
20
-{ \
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
23
- /* result zero */ \
24
- switch (fp_status->float_rounding_mode) { \
25
- case float_round_down: \
26
- return zero_##SUFFIX(1); \
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
278
--
279
2.43.0
diff view generated by jsdifflib
New patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ typedef union {
15
};
16
} Double;
17
18
-typedef union {
19
- float f;
20
- uint32_t i;
21
- struct {
22
- uint32_t mant:23;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
47
--
48
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
This structure, with bitfields, is incorrect for big-endian.
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Use extract64 and deposit64 instead.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
host/include/loongarch64/host/cpuinfo.h | 1 +
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
6
util/cpuinfo-loongarch.c | 1 +
8
1 file changed, 16 insertions(+), 30 deletions(-)
7
2 files changed, 2 insertions(+)
8
9
9
diff --git a/host/include/loongarch64/host/cpuinfo.h b/host/include/loongarch64/host/cpuinfo.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/host/include/loongarch64/host/cpuinfo.h
12
--- a/target/hexagon/fma_emu.c
12
+++ b/host/include/loongarch64/host/cpuinfo.h
13
+++ b/target/hexagon/fma_emu.c
13
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
14
15
15
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
16
#define WAY_BIG_EXP 4096
16
#define CPUINFO_LSX (1u << 1)
17
17
+#define CPUINFO_LASX (1u << 2)
18
-typedef union {
18
19
- double f;
19
/* Initialized with a constructor. */
20
- uint64_t i;
20
extern unsigned cpuinfo;
21
- struct {
21
diff --git a/util/cpuinfo-loongarch.c b/util/cpuinfo-loongarch.c
22
- uint64_t mant:52;
22
index XXXXXXX..XXXXXXX 100644
23
- uint64_t exp:11;
23
--- a/util/cpuinfo-loongarch.c
24
- uint64_t sign:1;
24
+++ b/util/cpuinfo-loongarch.c
25
- };
25
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
26
-} Double;
26
27
-
27
info = CPUINFO_ALWAYS;
28
static uint64_t float64_getmant(float64 f64)
28
info |= (hwcap & HWCAP_LOONGARCH_LSX ? CPUINFO_LSX : 0);
29
{
29
+ info |= (hwcap & HWCAP_LOONGARCH_LASX ? CPUINFO_LASX : 0);
30
- Double a = { .i = f64 };
30
31
+ uint64_t mant = extract64(f64, 0, 52);
31
cpuinfo = info;
32
if (float64_is_normal(f64)) {
32
return info;
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
44
}
45
46
int32_t float64_getexp(float64 f64)
47
{
48
- Double a = { .i = f64 };
49
+ int exp = extract64(f64, 52, 11);
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
33
--
100
--
34
2.34.1
101
2.43.0
35
36
diff view generated by jsdifflib
New patch
1
No need to open-code 64x64->128-bit multiplication.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
7
1 file changed, 3 insertions(+), 29 deletions(-)
8
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/fma_emu.c
12
+++ b/target/hexagon/fma_emu.c
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
14
return -1;
15
}
16
17
-static uint32_t int128_getw0(Int128 x)
18
-{
19
- return int128_getlo(x);
20
-}
21
-
22
-static uint32_t int128_getw1(Int128 x)
23
-{
24
- return int128_getlo(x) >> 32;
25
-}
26
-
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
28
{
29
- Int128 a, b;
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
47
- }
48
-
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
50
+ mulu64(&l, &h, ai, bi);
51
+ return int128_make128(l, h);
52
}
53
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
55
--
56
2.43.0
diff view generated by jsdifflib
New patch
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
15
float64_is_infinity(b)) {
16
return float64_mul(a, b, fp_status);
17
}
18
- x.mant = int128_mul_6464(accumulated, 1);
19
+ x.mant = int128_make64(accumulated);
20
x.sticky = sticky;
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
23
--
24
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Song Gao <gaosong@loongson.cn>
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
4
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
tcg/loongarch64/tcg-target.c.inc | 18 +-----------------
8
include/exec/translator.h | 14 --------------
6
1 file changed, 1 insertion(+), 17 deletions(-)
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
7
71
8
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
9
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/loongarch64/tcg-target.c.inc
74
--- a/include/exec/translator.h
11
+++ b/tcg/loongarch64/tcg-target.c.inc
75
+++ b/include/exec/translator.h
12
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
76
@@ -XXX,XX +XXX,XX @@
13
77
#include "qemu/bswap.h"
14
/* TODO: vldi patterns when imm 12 is set */
78
#include "exec/vaddr.h"
15
79
16
- /* Fallback to vreplgr2vr */
80
-/**
17
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
81
- * gen_intermediate_code
18
- switch (vece) {
82
- * @cpu: cpu context
19
- case MO_8:
83
- * @tb: translation block
20
- tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
84
- * @max_insns: max number of instructions to translate
21
- break;
85
- * @pc: guest virtual program counter address
22
- case MO_16:
86
- * @host_pc: host physical program counter address
23
- tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
87
- *
24
- break;
88
- * This function must be provided by the target, which should create
25
- case MO_32:
89
- * the target-specific DisasContext, and then invoke translator_loop.
26
- tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
90
- */
27
- break;
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
28
- case MO_64:
92
- vaddr pc, void *host_pc);
29
- tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
93
-
30
- break;
94
/**
31
- default:
95
* DisasJumpType:
32
- g_assert_not_reached();
96
* @DISAS_NEXT: Next instruction in program order.
33
- }
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
34
+ tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0);
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
35
}
152
}
36
153
37
static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
38
--
953
--
39
2.34.1
954
2.43.0
40
955
41
956
diff view generated by jsdifflib