From nobody Thu May  8 17:13:04 2025
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as
 permitted sender) client-ip=208.118.235.17;
 envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org;
 helo=lists.gnu.org;
Authentication-Results: mx.zohomail.com;
	spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted
 sender)  smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org;
	dmarc=fail(p=none dis=none)  header.from=linaro.org
Return-Path: <qemu-devel-bounces+importer=patchew.org@nongnu.org>
Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by
 mx.zohomail.com
	with SMTPS id 153996928271267.63570829263506;
 Fri, 19 Oct 2018 10:14:42 -0700 (PDT)
Received: from localhost ([::1]:51779 helo=lists.gnu.org)
	by lists.gnu.org with esmtp (Exim 4.71)
	(envelope-from <qemu-devel-bounces+importer=patchew.org@nongnu.org>)
	id 1gDYMD-0001xw-6O
	for importer@patchew.org; Fri, 19 Oct 2018 13:14:41 -0400
Received: from eggs.gnu.org ([2001:4830:134:3::10]:47724)
	by lists.gnu.org with esmtp (Exim 4.71)
	(envelope-from <pm215@archaic.org.uk>) id 1gDY6Y-00021T-7W
	for qemu-devel@nongnu.org; Fri, 19 Oct 2018 12:58:31 -0400
Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71)
	(envelope-from <pm215@archaic.org.uk>) id 1gDY6W-0002jP-8m
	for qemu-devel@nongnu.org; Fri, 19 Oct 2018 12:58:30 -0400
Received: from orth.archaic.org.uk ([2001:8b0:1d0::2]:51982)
	by eggs.gnu.org with esmtps (TLS1.0:RSA_AES_256_CBC_SHA1:32)
	(Exim 4.71) (envelope-from <pm215@archaic.org.uk>)
	id 1gDY6U-0002Jh-74
	for qemu-devel@nongnu.org; Fri, 19 Oct 2018 12:58:27 -0400
Received: from pm215 by orth.archaic.org.uk with local (Exim 4.89)
	(envelope-from <pm215@archaic.org.uk>) id 1gDY6F-0006nX-HA
	for qemu-devel@nongnu.org; Fri, 19 Oct 2018 17:58:11 +0100
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Date: Fri, 19 Oct 2018 17:57:25 +0100
Message-Id: <20181019165735.22511-36-peter.maydell@linaro.org>
X-Mailer: git-send-email 2.19.1
In-Reply-To: <20181019165735.22511-1-peter.maydell@linaro.org>
References: <20181019165735.22511-1-peter.maydell@linaro.org>
MIME-Version: 1.0
Content-Transfer-Encoding: quoted-printable
X-detected-operating-system: by eggs.gnu.org: Genre and OS details not
	recognized.
X-Received-From: 2001:8b0:1d0::2
Subject: [Qemu-devel] [PULL 35/45] target/arm: Use gvec for VSRI, VSLI
X-BeenThere: qemu-devel@nongnu.org
X-Mailman-Version: 2.1.21
Precedence: list
List-Id: <qemu-devel.nongnu.org>
List-Unsubscribe: <https://lists.nongnu.org/mailman/options/qemu-devel>,
	<mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>
List-Archive: <http://lists.nongnu.org/archive/html/qemu-devel/>
List-Post: <mailto:qemu-devel@nongnu.org>
List-Help: <mailto:qemu-devel-request@nongnu.org?subject=help>
List-Subscribe: <https://lists.nongnu.org/mailman/listinfo/qemu-devel>,
	<mailto:qemu-devel-request@nongnu.org?subject=subscribe>
Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org
Sender: "Qemu-devel" <qemu-devel-bounces+importer=patchew.org@nongnu.org>
X-ZohoMail: RDMRC_1  RSF_0  Z_629925259 SPT_0
Content-Type: text/plain; charset="utf-8"

From: Richard Henderson <richard.henderson@linaro.org>

Move shi_op and sli_op expanders from translate-a64.c.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20181011205206.3552-15-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/translate.h     |   2 +
 target/arm/translate-a64.c | 152 +----------------------
 target/arm/translate.c     | 244 ++++++++++++++++++++++++++-----------
 3 files changed, 179 insertions(+), 219 deletions(-)

diff --git a/target/arm/translate.h b/target/arm/translate.h
index 5e13571b362..7eb759d0414 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -198,6 +198,8 @@ extern const GVecGen3 bit_op;
 extern const GVecGen3 bif_op;
 extern const GVecGen2i ssra_op[4];
 extern const GVecGen2i usra_op[4];
+extern const GVecGen2i sri_op[4];
+extern const GVecGen2i sli_op[4];
=20
 /*
  * Forward to the isar_feature_* tests given a DisasContext pointer.
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 6d11e384898..c26168d72c8 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -9392,85 +9392,10 @@ static void disas_simd_scalar_two_reg_misc(DisasCon=
text *s, uint32_t insn)
     }
 }
=20
-static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
-    uint64_t mask =3D dup_const(MO_8, 0xff >> shift);
-    TCGv_i64 t =3D tcg_temp_new_i64();
-
-    tcg_gen_shri_i64(t, a, shift);
-    tcg_gen_andi_i64(t, t, mask);
-    tcg_gen_andi_i64(d, d, ~mask);
-    tcg_gen_or_i64(d, d, t);
-    tcg_temp_free_i64(t);
-}
-
-static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
-    uint64_t mask =3D dup_const(MO_16, 0xffff >> shift);
-    TCGv_i64 t =3D tcg_temp_new_i64();
-
-    tcg_gen_shri_i64(t, a, shift);
-    tcg_gen_andi_i64(t, t, mask);
-    tcg_gen_andi_i64(d, d, ~mask);
-    tcg_gen_or_i64(d, d, t);
-    tcg_temp_free_i64(t);
-}
-
-static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
-    tcg_gen_shri_i32(a, a, shift);
-    tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
-}
-
-static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
-    tcg_gen_shri_i64(a, a, shift);
-    tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
-}
-
-static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t=
 sh)
-{
-    uint64_t mask =3D (2ull << ((8 << vece) - 1)) - 1;
-    TCGv_vec t =3D tcg_temp_new_vec_matching(d);
-    TCGv_vec m =3D tcg_temp_new_vec_matching(d);
-
-    tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
-    tcg_gen_shri_vec(vece, t, a, sh);
-    tcg_gen_and_vec(vece, d, d, m);
-    tcg_gen_or_vec(vece, d, d, t);
-
-    tcg_temp_free_vec(t);
-    tcg_temp_free_vec(m);
-}
-
 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
                                  int immh, int immb, int opcode, int rn, i=
nt rd)
 {
-    static const GVecGen2i sri_op[4] =3D {
-        { .fni8 =3D gen_shr8_ins_i64,
-          .fniv =3D gen_shr_ins_vec,
-          .load_dest =3D true,
-          .opc =3D INDEX_op_shri_vec,
-          .vece =3D MO_8 },
-        { .fni8 =3D gen_shr16_ins_i64,
-          .fniv =3D gen_shr_ins_vec,
-          .load_dest =3D true,
-          .opc =3D INDEX_op_shri_vec,
-          .vece =3D MO_16 },
-        { .fni4 =3D gen_shr32_ins_i32,
-          .fniv =3D gen_shr_ins_vec,
-          .load_dest =3D true,
-          .opc =3D INDEX_op_shri_vec,
-          .vece =3D MO_32 },
-        { .fni8 =3D gen_shr64_ins_i64,
-          .fniv =3D gen_shr_ins_vec,
-          .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
-          .load_dest =3D true,
-          .opc =3D INDEX_op_shri_vec,
-          .vece =3D MO_64 },
-    };
-
     int size =3D 32 - clz32(immh) - 1;
     int immhb =3D immh << 3 | immb;
     int shift =3D 2 * (8 << size) - immhb;
@@ -9566,85 +9491,10 @@ static void handle_vec_simd_shri(DisasContext *s, b=
ool is_q, bool is_u,
     clear_vec_high(s, is_q, rd);
 }
=20
-static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
-    uint64_t mask =3D dup_const(MO_8, 0xff << shift);
-    TCGv_i64 t =3D tcg_temp_new_i64();
-
-    tcg_gen_shli_i64(t, a, shift);
-    tcg_gen_andi_i64(t, t, mask);
-    tcg_gen_andi_i64(d, d, ~mask);
-    tcg_gen_or_i64(d, d, t);
-    tcg_temp_free_i64(t);
-}
-
-static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
-    uint64_t mask =3D dup_const(MO_16, 0xffff << shift);
-    TCGv_i64 t =3D tcg_temp_new_i64();
-
-    tcg_gen_shli_i64(t, a, shift);
-    tcg_gen_andi_i64(t, t, mask);
-    tcg_gen_andi_i64(d, d, ~mask);
-    tcg_gen_or_i64(d, d, t);
-    tcg_temp_free_i64(t);
-}
-
-static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
-    tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
-}
-
-static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
-    tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
-}
-
-static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t=
 sh)
-{
-    uint64_t mask =3D (1ull << sh) - 1;
-    TCGv_vec t =3D tcg_temp_new_vec_matching(d);
-    TCGv_vec m =3D tcg_temp_new_vec_matching(d);
-
-    tcg_gen_dupi_vec(vece, m, mask);
-    tcg_gen_shli_vec(vece, t, a, sh);
-    tcg_gen_and_vec(vece, d, d, m);
-    tcg_gen_or_vec(vece, d, d, t);
-
-    tcg_temp_free_vec(t);
-    tcg_temp_free_vec(m);
-}
-
 /* SHL/SLI - Vector shift left */
 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
                                  int immh, int immb, int opcode, int rn, i=
nt rd)
 {
-    static const GVecGen2i shi_op[4] =3D {
-        { .fni8 =3D gen_shl8_ins_i64,
-          .fniv =3D gen_shl_ins_vec,
-          .opc =3D INDEX_op_shli_vec,
-          .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
-          .load_dest =3D true,
-          .vece =3D MO_8 },
-        { .fni8 =3D gen_shl16_ins_i64,
-          .fniv =3D gen_shl_ins_vec,
-          .opc =3D INDEX_op_shli_vec,
-          .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
-          .load_dest =3D true,
-          .vece =3D MO_16 },
-        { .fni4 =3D gen_shl32_ins_i32,
-          .fniv =3D gen_shl_ins_vec,
-          .opc =3D INDEX_op_shli_vec,
-          .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
-          .load_dest =3D true,
-          .vece =3D MO_32 },
-        { .fni8 =3D gen_shl64_ins_i64,
-          .fniv =3D gen_shl_ins_vec,
-          .opc =3D INDEX_op_shli_vec,
-          .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
-          .load_dest =3D true,
-          .vece =3D MO_64 },
-    };
     int size =3D 32 - clz32(immh) - 1;
     int immhb =3D immh << 3 | immb;
     int shift =3D immhb - (8 << size);
@@ -9664,7 +9514,7 @@ static void handle_vec_simd_shli(DisasContext *s, boo=
l is_q, bool insert,
     }
=20
     if (insert) {
-        gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
+        gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
     } else {
         gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
     }
diff --git a/target/arm/translate.c b/target/arm/translate.c
index b3b2ef93f4d..ee7294e54f0 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5878,6 +5878,160 @@ const GVecGen2i usra_op[4] =3D {
       .vece =3D MO_64, },
 };
=20
+static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    uint64_t mask =3D dup_const(MO_8, 0xff >> shift);
+    TCGv_i64 t =3D tcg_temp_new_i64();
+
+    tcg_gen_shri_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    uint64_t mask =3D dup_const(MO_16, 0xffff >> shift);
+    TCGv_i64 t =3D tcg_temp_new_i64();
+
+    tcg_gen_shri_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+    tcg_gen_shri_i32(a, a, shift);
+    tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
+}
+
+static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    tcg_gen_shri_i64(a, a, shift);
+    tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
+}
+
+static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t=
 sh)
+{
+    if (sh =3D=3D 0) {
+        tcg_gen_mov_vec(d, a);
+    } else {
+        TCGv_vec t =3D tcg_temp_new_vec_matching(d);
+        TCGv_vec m =3D tcg_temp_new_vec_matching(d);
+
+        tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
+        tcg_gen_shri_vec(vece, t, a, sh);
+        tcg_gen_and_vec(vece, d, d, m);
+        tcg_gen_or_vec(vece, d, d, t);
+
+        tcg_temp_free_vec(t);
+        tcg_temp_free_vec(m);
+    }
+}
+
+const GVecGen2i sri_op[4] =3D {
+    { .fni8 =3D gen_shr8_ins_i64,
+      .fniv =3D gen_shr_ins_vec,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shri_vec,
+      .vece =3D MO_8 },
+    { .fni8 =3D gen_shr16_ins_i64,
+      .fniv =3D gen_shr_ins_vec,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shri_vec,
+      .vece =3D MO_16 },
+    { .fni4 =3D gen_shr32_ins_i32,
+      .fniv =3D gen_shr_ins_vec,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shri_vec,
+      .vece =3D MO_32 },
+    { .fni8 =3D gen_shr64_ins_i64,
+      .fniv =3D gen_shr_ins_vec,
+      .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shri_vec,
+      .vece =3D MO_64 },
+};
+
+static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    uint64_t mask =3D dup_const(MO_8, 0xff << shift);
+    TCGv_i64 t =3D tcg_temp_new_i64();
+
+    tcg_gen_shli_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    uint64_t mask =3D dup_const(MO_16, 0xffff << shift);
+    TCGv_i64 t =3D tcg_temp_new_i64();
+
+    tcg_gen_shli_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+    tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
+}
+
+static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
+}
+
+static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t=
 sh)
+{
+    if (sh =3D=3D 0) {
+        tcg_gen_mov_vec(d, a);
+    } else {
+        TCGv_vec t =3D tcg_temp_new_vec_matching(d);
+        TCGv_vec m =3D tcg_temp_new_vec_matching(d);
+
+        tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
+        tcg_gen_shli_vec(vece, t, a, sh);
+        tcg_gen_and_vec(vece, d, d, m);
+        tcg_gen_or_vec(vece, d, d, t);
+
+        tcg_temp_free_vec(t);
+        tcg_temp_free_vec(m);
+    }
+}
+
+const GVecGen2i sli_op[4] =3D {
+    { .fni8 =3D gen_shl8_ins_i64,
+      .fniv =3D gen_shl_ins_vec,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shli_vec,
+      .vece =3D MO_8 },
+    { .fni8 =3D gen_shl16_ins_i64,
+      .fniv =3D gen_shl_ins_vec,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shli_vec,
+      .vece =3D MO_16 },
+    { .fni4 =3D gen_shl32_ins_i32,
+      .fniv =3D gen_shl_ins_vec,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shli_vec,
+      .vece =3D MO_32 },
+    { .fni8 =3D gen_shl64_ins_i64,
+      .fniv =3D gen_shl_ins_vec,
+      .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64,
+      .load_dest =3D true,
+      .opc =3D INDEX_op_shli_vec,
+      .vece =3D MO_64 },
+};
+
 /* Translate a NEON data processing instruction.  Return nonzero if the
    instruction is invalid.
    We process data in a mixture of 32-bit and 64-bit chunks.
@@ -5895,7 +6049,7 @@ static int disas_neon_data_insn(DisasContext *s, uint=
32_t insn)
     int pairwise;
     int u;
     int vec_size;
-    uint32_t imm, mask;
+    uint32_t imm;
     TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
     TCGv_ptr ptr1, ptr2, ptr3;
     TCGv_i64 tmp64;
@@ -6534,8 +6688,27 @@ static int disas_neon_data_insn(DisasContext *s, uin=
t32_t insn)
                     }
                     return 0;
=20
+                case 4: /* VSRI */
+                    if (!u) {
+                        return 1;
+                    }
+                    /* Right shift comes here negative.  */
+                    shift =3D -shift;
+                    /* Shift out of range leaves destination unchanged.  */
+                    if (shift < 8 << size) {
+                        tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
+                                        shift, &sri_op[size]);
+                    }
+                    return 0;
+
                 case 5: /* VSHL, VSLI */
-                    if (!u) { /* VSHL */
+                    if (u) { /* VSLI */
+                        /* Shift out of range leaves destination unchanged=
.  */
+                        if (shift < 8 << size) {
+                            tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
+                                            vec_size, shift, &sli_op[size]=
);
+                        }
+                    } else { /* VSHL */
                         /* Shifts larger than the element size are
                          * architecturally valid and results in zero.
                          */
@@ -6545,9 +6718,8 @@ static int disas_neon_data_insn(DisasContext *s, uint=
32_t insn)
                             tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
                                               vec_size, vec_size);
                         }
-                        return 0;
                     }
-                    break;
+                    return 0;
                 }
=20
                 if (size =3D=3D 3) {
@@ -6573,10 +6745,6 @@ static int disas_neon_data_insn(DisasContext *s, uin=
t32_t insn)
                             else
                                 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, c=
pu_V1);
                             break;
-                        case 4: /* VSRI */
-                        case 5: /* VSHL, VSLI */
-                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1=
);
-                            break;
                         case 6: /* VQSHLU */
                             gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
                                                       cpu_V0, cpu_V1);
@@ -6597,21 +6765,6 @@ static int disas_neon_data_insn(DisasContext *s, uin=
t32_t insn)
                             /* Accumulate.  */
                             neon_load_reg64(cpu_V1, rd + pass);
                             tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
-                        } else if (op =3D=3D 4 || (op =3D=3D 5 && u)) {
-                            /* Insert */
-                            neon_load_reg64(cpu_V1, rd + pass);
-                            uint64_t mask;
-                            if (shift < -63 || shift > 63) {
-                                mask =3D 0;
-                            } else {
-                                if (op =3D=3D 4) {
-                                    mask =3D 0xffffffffffffffffull >> -shi=
ft;
-                                } else {
-                                    mask =3D 0xffffffffffffffffull << shif=
t;
-                                }
-                            }
-                            tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
-                            tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
                         }
                         neon_store_reg64(cpu_V0, rd + pass);
                     } else { /* size < 3 */
@@ -6624,15 +6777,6 @@ static int disas_neon_data_insn(DisasContext *s, uin=
t32_t insn)
                         case 3: /* VRSRA */
                             GEN_NEON_INTEGER_OP(rshl);
                             break;
-                        case 4: /* VSRI */
-                        case 5: /* VSHL, VSLI */
-                            switch (size) {
-                            case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2)=
; break;
-                            case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2=
); break;
-                            case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2=
); break;
-                            default: abort();
-                            }
-                            break;
                         case 6: /* VQSHLU */
                             switch (size) {
                             case 0:
@@ -6664,42 +6808,6 @@ static int disas_neon_data_insn(DisasContext *s, uin=
t32_t insn)
                             tmp2 =3D neon_load_reg(rd, pass);
                             gen_neon_add(size, tmp, tmp2);
                             tcg_temp_free_i32(tmp2);
-                        } else if (op =3D=3D 4 || (op =3D=3D 5 && u)) {
-                            /* Insert */
-                            switch (size) {
-                            case 0:
-                                if (op =3D=3D 4)
-                                    mask =3D 0xff >> -shift;
-                                else
-                                    mask =3D (uint8_t)(0xff << shift);
-                                mask |=3D mask << 8;
-                                mask |=3D mask << 16;
-                                break;
-                            case 1:
-                                if (op =3D=3D 4)
-                                    mask =3D 0xffff >> -shift;
-                                else
-                                    mask =3D (uint16_t)(0xffff << shift);
-                                mask |=3D mask << 16;
-                                break;
-                            case 2:
-                                if (shift < -31 || shift > 31) {
-                                    mask =3D 0;
-                                } else {
-                                    if (op =3D=3D 4)
-                                        mask =3D 0xffffffffu >> -shift;
-                                    else
-                                        mask =3D 0xffffffffu << shift;
-                                }
-                                break;
-                            default:
-                                abort();
-                            }
-                            tmp2 =3D neon_load_reg(rd, pass);
-                            tcg_gen_andi_i32(tmp, tmp, mask);
-                            tcg_gen_andi_i32(tmp2, tmp2, ~mask);
-                            tcg_gen_or_i32(tmp, tmp, tmp2);
-                            tcg_temp_free_i32(tmp2);
                         }
                         neon_store_reg(rd, pass, tmp);
                     }
--=20
2.19.1