This includes SHL and SLI.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/tcg/translate-a64.c | 33 +++------------------------------
target/arm/tcg/a64.decode | 15 +++++++++++++++
2 files changed, 18 insertions(+), 30 deletions(-)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 5c76cdf101..1225aac665 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -6980,6 +6980,8 @@ TRANS(URSHR_v, do_vec_shift_imm, a, gen_gvec_urshr)
TRANS(SRSRA_v, do_vec_shift_imm, a, gen_gvec_srsra)
TRANS(URSRA_v, do_vec_shift_imm, a, gen_gvec_ursra)
TRANS(SRI_v, do_vec_shift_imm, a, gen_gvec_sri)
+TRANS(SHL_v, do_vec_shift_imm, a, tcg_gen_gvec_shli)
+TRANS(SLI_v, do_vec_shift_imm, a, gen_gvec_sli);
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
@@ -10445,33 +10447,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
}
}
-/* SHL/SLI - Vector shift left */
-static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
- int immh, int immb, int opcode, int rn, int rd)
-{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = immhb - (8 << size);
-
- /* Range of size is limited by decode: immh is a non-zero 4 bit field */
- assert(size >= 0 && size <= 3);
-
- if (extract32(immh, 3, 1) && !is_q) {
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- if (insert) {
- gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
- } else {
- gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
- }
-}
-
/* USHLL/SHLL - Vector shift left with widening */
static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
int immh, int immb, int opcode, int rn, int rd)
@@ -10585,9 +10560,6 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
}
switch (opcode) {
- case 0x0a: /* SHL / SLI */
- handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
- break;
case 0x10: /* SHRN */
case 0x11: /* RSHRN / SQRSHRUN */
if (is_u) {
@@ -10628,6 +10600,7 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
case 0x04: /* SRSHR / URSHR (rounding) */
case 0x06: /* SRSRA / URSRA (accum + rounding) */
case 0x08: /* SRI */
+ case 0x0a: /* SHL / SLI */
unallocated_encoding(s);
return;
}
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 74ba1fa07c..77b860a3f2 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -1205,6 +1205,11 @@ FMOVI_s 0001 1110 .. 1 imm:8 100 00000 rd:5 esz=%esz_hsd
@q_shri_d . 1 .. ..... 1 ...... ..... . rn:5 rd:5 \
&qrri_e esz=3 imm=%neon_rshift_i6 q=1
+@q_shli_b . q:1 .. ..... 0001 imm:3 ..... . rn:5 rd:5 &qrri_e esz=0
+@q_shli_h . q:1 .. ..... 001 imm:4 ..... . rn:5 rd:5 &qrri_e esz=1
+@q_shli_s . q:1 .. ..... 01 imm:5 ..... . rn:5 rd:5 &qrri_e esz=2
+@q_shli_d . 1 .. ..... 1 imm:6 ..... . rn:5 rd:5 &qrri_e esz=3 q=1
+
FMOVI_v_h 0 q:1 00 1111 00000 ... 1111 11 ..... rd:5 %abcdefgh
# MOVI, MVNI, ORR, BIC, FMOV are all intermixed via cmode.
@@ -1254,3 +1259,13 @@ SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_b
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_h
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_s
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_d
+
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_b
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_h
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_s
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_d
+
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_b
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_h
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_s
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_d
--
2.43.0