This includes SHL and SLI.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/tcg/translate-a64.c | 44 +++++++---------------------------
target/arm/tcg/a64.decode | 4 ++++
2 files changed, 13 insertions(+), 35 deletions(-)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index efd93a7f23..934746d2f2 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -7124,6 +7124,11 @@ static void gen_sri_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
}
}
+static void gen_sli_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ tcg_gen_deposit_i64(dst, dst, src, shift, 64 - shift);
+}
+
static bool do_vec_shift_imm_narrow(DisasContext *s, arg_qrri_e *a,
WideShiftImmFn * const fns[3], MemOp sign)
{
@@ -7201,6 +7206,9 @@ TRANS(SRSRA_s, do_scalar_shift_imm, a, gen_srsra_d, true, 0)
TRANS(URSRA_s, do_scalar_shift_imm, a, gen_ursra_d, true, 0)
TRANS(SRI_s, do_scalar_shift_imm, a, gen_sri_d, true, 0)
+TRANS(SHL_s, do_scalar_shift_imm, a, tcg_gen_shli_i64, false, 0)
+TRANS(SLI_s, do_scalar_shift_imm, a, gen_sli_d, true, 0)
+
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@@ -9417,38 +9425,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
}
}
-/* SHL/SLI - Scalar shift left */
-static void handle_scalar_simd_shli(DisasContext *s, bool insert,
- int immh, int immb, int opcode,
- int rn, int rd)
-{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = immhb - (8 << size);
- TCGv_i64 tcg_rn;
- TCGv_i64 tcg_rd;
-
- if (!extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- tcg_rn = read_fp_dreg(s, rn);
- tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
-
- if (insert) {
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
- } else {
- tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
- }
-
- write_fp_dreg(s, rd, tcg_rd);
-}
-
/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
* (signed/unsigned) narrowing */
static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
@@ -9900,9 +9876,6 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
}
switch (opcode) {
- case 0x0a: /* SHL / SLI */
- handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
- break;
case 0x1c: /* SCVTF, UCVTF */
handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
opcode, rn, rd);
@@ -9940,6 +9913,7 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
case 0x04: /* SRSHR / URSHR */
case 0x06: /* SRSRA / URSRA */
case 0x08: /* SRI */
+ case 0x0a: /* SHL / SLI */
unallocated_encoding(s);
break;
}
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 6c2362b3bb..96803fe6e4 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -1291,6 +1291,7 @@ RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_s
@shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \
&rri_e esz=3 imm=%neon_rshift_i6
+@shli_d .... ..... 1 imm:6 ..... . rn:5 rd:5 &rri_e esz=3
SSHR_s 0101 11110 .... ... 00000 1 ..... ..... @shri_d
USHR_s 0111 11110 .... ... 00000 1 ..... ..... @shri_d
@@ -1301,3 +1302,6 @@ URSHR_s 0111 11110 .... ... 00100 1 ..... ..... @shri_d
SRSRA_s 0101 11110 .... ... 00110 1 ..... ..... @shri_d
URSRA_s 0111 11110 .... ... 00110 1 ..... ..... @shri_d
SRI_s 0111 11110 .... ... 01000 1 ..... ..... @shri_d
+
+SHL_s 0101 11110 .... ... 01010 1 ..... ..... @shli_d
+SLI_s 0111 11110 .... ... 01010 1 ..... ..... @shli_d
--
2.43.0