From nobody Tue Oct 28 14:40:14 2025 Delivered-To: importer@patchew.org Received-SPF: temperror (zoho.com: Error in retrieving data from DNS) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; dkim=fail; spf=temperror (zoho.com: Error in retrieving data from DNS) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (208.118.235.17 [208.118.235.17]) by mx.zohomail.com with SMTPS id 1513618208313745.2814819394172; Mon, 18 Dec 2017 09:30:08 -0800 (PST) Received: from localhost ([::1]:58593 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1eQzET-0001Mp-II for importer@patchew.org; Mon, 18 Dec 2017 12:29:41 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:37274) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1eQz3d-0000X0-6b for qemu-devel@nongnu.org; Mon, 18 Dec 2017 12:18:35 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1eQz3W-0001AI-Vg for qemu-devel@nongnu.org; Mon, 18 Dec 2017 12:18:29 -0500 Received: from mail-pf0-x243.google.com ([2607:f8b0:400e:c00::243]:34670) by eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16) (Exim 4.71) (envelope-from ) id 1eQz3W-00019G-Ic for qemu-devel@nongnu.org; Mon, 18 Dec 2017 12:18:22 -0500 Received: by mail-pf0-x243.google.com with SMTP id a90so9948070pfk.1 for ; Mon, 18 Dec 2017 09:18:22 -0800 (PST) Received: from cloudburst.twiddle.net (174-21-7-63.tukw.qwest.net. [174.21.7.63]) by smtp.gmail.com with ESMTPSA id y19sm21050272pgv.19.2017.12.18.09.18.19 (version=TLS1_2 cipher=ECDHE-RSA-CHACHA20-POLY1305 bits=256/256); Mon, 18 Dec 2017 09:18:20 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=laXKmgFuu0JyfyjVocpo9O502Buy5yno3N0zMiOVxgg=; b=XI1Q0p/odceDGkWHfwRE3TMid+llK8TtcCVEb761xznZiD0w6MhtXlfuyR1giIXkwH CC+o3TQxxdoNw49sbsC2vMmUOECaS1O7aXsgJBjd5Yhhk1GrPz4wHqTBo4xYN/iZO/Dg 6TNZzFVzeP8e1HjQvrZYDAnpmMbRtFPygBNto= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=laXKmgFuu0JyfyjVocpo9O502Buy5yno3N0zMiOVxgg=; b=l0MhLGJaMShEjoy+iXjX5aRqG4qs79yS/XeaIFUBBoACboq99us1f0VzJ2qNtLnY/Q CuPgY7Zc5kXaNFLlQjJieWEdMjy78LECFfF02duX8fEp8FpGzJDYW6CGOwd+AYjdZDdV Abj5ggFRRdFLCZNVbIuL6BbPCxZb6TbrLU9EM1AiPWPZVe2VCaGQN2xBL98qgbF0nMNV mhdMZ5suRBXawNBFWY7HJVkn8HyPfVS1lCqWc6DTTC9FH5g2v+DK0wVUkJL0v5cWrS1A O3TROMrODiQF1EkHAi81GRzKPI6Agu3edAfocooS2wT9zJuObnv4qdSnSHmPqeeOMNWi ld/Q== X-Gm-Message-State: AKGB3mI9wqna2zwS6r3SBLVDnjs1ab2rFGROr8rxtOldM5zoYZzcOg0C BcSc33nFhi7Cu8Ee8tOqVqn7dy0Nu1E= X-Google-Smtp-Source: ACJfBou+nLN+sZl08RkTGQ+llq6x890+RinRQ+dwcifGudMoFdkYiMtwuuGvOxLbK37u2x1nluYB6w== X-Received: by 10.98.70.132 with SMTP id o4mr426242pfi.102.1513617501149; Mon, 18 Dec 2017 09:18:21 -0800 (PST) From: Richard Henderson To: qemu-devel@nongnu.org Date: Mon, 18 Dec 2017 09:17:45 -0800 Message-Id: <20171218171758.16964-14-richard.henderson@linaro.org> X-Mailer: git-send-email 2.14.3 In-Reply-To: <20171218171758.16964-1-richard.henderson@linaro.org> References: <20171218171758.16964-1-richard.henderson@linaro.org> X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 2607:f8b0:400e:c00::243 Subject: [Qemu-devel] [PATCH v7 13/26] target/arm: Use vector infrastructure for aa64 constant shifts X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: peter.maydell@linaro.org Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail-DKIM: fail (Header signature does not verify) X-ZohoMail: RDKM_2 RSF_6 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Signed-off-by: Richard Henderson --- target/arm/translate-a64.c | 386 ++++++++++++++++++++++++++++++++++++++---= ---- tcg/tcg-op-gvec.c | 18 ++- tcg/tcg-op-vec.c | 9 +- 3 files changed, 351 insertions(+), 62 deletions(-) diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 8769b4505a..c47faa5633 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -6432,17 +6432,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res= , TCGv_i64 tcg_src, } } =20 -/* Common SHL/SLI - Shift left with an optional insert */ -static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src, - bool insert, int shift) -{ - if (insert) { /* SLI */ - tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift); - } else { /* SHL */ - tcg_gen_shli_i64(tcg_res, tcg_src, shift); - } -} - /* SRI: shift right with insert */ static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src, int size, int shift) @@ -6546,7 +6535,11 @@ static void handle_scalar_simd_shli(DisasContext *s,= bool insert, tcg_rn =3D read_fp_dreg(s, rn); tcg_rd =3D insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); =20 - handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift); + if (insert) { + tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift); + } else { + tcg_gen_shli_i64(tcg_rd, tcg_rn, shift); + } =20 write_fp_dreg(s, rd, tcg_rd); =20 @@ -8283,16 +8276,195 @@ static void disas_simd_scalar_two_reg_misc(DisasCo= ntext *s, uint32_t insn) } } =20 +static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_vec_sar8i_i64(a, a, shift); + tcg_gen_vec_add8_i64(d, d, a); +} + +static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_vec_sar16i_i64(a, a, shift); + tcg_gen_vec_add16_i64(d, d, a); +} + +static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift) +{ + tcg_gen_sari_i32(a, a, shift); + tcg_gen_add_i32(d, d, a); +} + +static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_sari_i64(a, a, shift); + tcg_gen_add_i64(d, d, a); +} + +static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigned s= h) +{ + tcg_gen_sari_vec(vece, a, a, sh); + tcg_gen_add_vec(vece, d, d, a); +} + +static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_vec_shr8i_i64(a, a, shift); + tcg_gen_vec_add8_i64(d, d, a); +} + +static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_vec_shr16i_i64(a, a, shift); + tcg_gen_vec_add16_i64(d, d, a); +} + +static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift) +{ + tcg_gen_shri_i32(a, a, shift); + tcg_gen_add_i32(d, d, a); +} + +static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_shri_i64(a, a, shift); + tcg_gen_add_i64(d, d, a); +} + +static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigned s= h) +{ + tcg_gen_shri_vec(vece, a, a, sh); + tcg_gen_add_vec(vece, d, d, a); +} + +static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + uint64_t mask =3D (0xff >> shift) * (-1ull / 0xff); + TCGv_i64 t =3D tcg_temp_new_i64(); + + tcg_gen_shri_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + uint64_t mask =3D (0xffff >> shift) * (-1ull / 0xffff); + TCGv_i64 t =3D tcg_temp_new_i64(); + + tcg_gen_shri_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift) +{ + tcg_gen_shri_i32(a, a, shift); + tcg_gen_deposit_i32(d, d, a, 0, 32 - shift); +} + +static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_shri_i64(a, a, shift); + tcg_gen_deposit_i64(d, d, a, 0, 64 - shift); +} + +static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigne= d sh) +{ + uint64_t mask =3D (2ull << ((8 << vece) - 1)) - 1; + TCGv_vec t =3D tcg_temp_new_vec_matching(d); + TCGv_vec m =3D tcg_temp_new_vec_matching(d); + + tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh)); + tcg_gen_shri_vec(vece, t, a, sh); + tcg_gen_and_vec(vece, d, d, m); + tcg_gen_or_vec(vece, d, d, t); + + tcg_temp_free_vec(t); + tcg_temp_free_vec(m); +} + /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int immh, int immb, int opcode, int rn, i= nt rd) { + static const GVecGen2i ssra_op[4] =3D { + { .fni8 =3D gen_ssra8_i64, + .fniv =3D gen_ssra_vec, + .load_dest =3D true, + .opc =3D INDEX_op_sari_vec, + .vece =3D MO_8 }, + { .fni8 =3D gen_ssra16_i64, + .fniv =3D gen_ssra_vec, + .load_dest =3D true, + .opc =3D INDEX_op_sari_vec, + .vece =3D MO_16 }, + { .fni4 =3D gen_ssra32_i32, + .fniv =3D gen_ssra_vec, + .load_dest =3D true, + .opc =3D INDEX_op_sari_vec, + .vece =3D MO_32 }, + { .fni8 =3D gen_ssra64_i64, + .fniv =3D gen_ssra_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .opc =3D INDEX_op_sari_vec, + .vece =3D MO_64 }, + }; + static const GVecGen2i usra_op[4] =3D { + { .fni8 =3D gen_usra8_i64, + .fniv =3D gen_usra_vec, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_8, }, + { .fni8 =3D gen_usra16_i64, + .fniv =3D gen_usra_vec, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_16, }, + { .fni4 =3D gen_usra32_i32, + .fniv =3D gen_usra_vec, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_32, }, + { .fni8 =3D gen_usra64_i64, + .fniv =3D gen_usra_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_64, }, + }; + static const GVecGen2i sri_op[4] =3D { + { .fni8 =3D gen_shr8_ins_i64, + .fniv =3D gen_shr_ins_vec, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_8 }, + { .fni8 =3D gen_shr16_ins_i64, + .fniv =3D gen_shr_ins_vec, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_16 }, + { .fni4 =3D gen_shr32_ins_i32, + .fniv =3D gen_shr_ins_vec, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_32 }, + { .fni8 =3D gen_shr64_ins_i64, + .fniv =3D gen_shr_ins_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .opc =3D INDEX_op_shri_vec, + .vece =3D MO_64 }, + }; + int size =3D 32 - clz32(immh) - 1; int immhb =3D immh << 3 | immb; int shift =3D 2 * (8 << size) - immhb; bool accumulate =3D false; - bool round =3D false; - bool insert =3D false; int dsize =3D is_q ? 128 : 64; int esize =3D 8 << size; int elements =3D dsize/esize; @@ -8300,6 +8472,8 @@ static void handle_vec_simd_shri(DisasContext *s, boo= l is_q, bool is_u, TCGv_i64 tcg_rn =3D new_tmp_a64(s); TCGv_i64 tcg_rd =3D new_tmp_a64(s); TCGv_i64 tcg_round; + uint64_t round_const; + const GVecGen2i *gvec_op; int i; =20 if (extract32(immh, 3, 1) && !is_q) { @@ -8318,64 +8492,141 @@ static void handle_vec_simd_shri(DisasContext *s, = bool is_q, bool is_u, =20 switch (opcode) { case 0x02: /* SSRA / USRA (accumulate) */ - accumulate =3D true; - break; + if (is_u) { + /* Shift count same as element size produces zero to add. */ + if (shift =3D=3D 8 << size) { + goto done; + } + gvec_op =3D &usra_op[size]; + } else { + /* Shift count same as element size produces all sign to add. = */ + if (shift =3D=3D 8 << size) { + shift -=3D 1; + } + gvec_op =3D &ssra_op[size]; + } + goto do_gvec; + case 0x08: /* SRI */ + /* Shift count same as element size is valid but does nothing. */ + if (shift =3D=3D 8 << size) { + goto done; + } + gvec_op =3D &sri_op[size]; + do_gvec: + tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), is_q ? 16 : 8, + vec_full_reg_size(s), shift, gvec_op); + return; + + case 0x00: /* SSHR / USHR */ + if (is_u) { + if (shift =3D=3D 8 << size) { + /* Shift count the same size as element size produces zero= . */ + tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd), + is_q ? 16 : 8, vec_full_reg_size(s), 0); + } else { + tcg_gen_gvec_shri(size, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), is_q ? 16 : = 8, + vec_full_reg_size(s), shift); + } + } else { + /* Shift count the same size as element size produces all sign= . */ + if (shift =3D=3D 8 << size) { + shift -=3D 1; + } + tcg_gen_gvec_sari(size, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), is_q ? 16 : 8, + vec_full_reg_size(s), shift); + } + return; + case 0x04: /* SRSHR / URSHR (rounding) */ - round =3D true; break; case 0x06: /* SRSRA / URSRA (accum + rounding) */ - accumulate =3D round =3D true; - break; - case 0x08: /* SRI */ - insert =3D true; + accumulate =3D true; break; + default: + g_assert_not_reached(); } =20 - if (round) { - uint64_t round_const =3D 1ULL << (shift - 1); - tcg_round =3D tcg_const_i64(round_const); - } else { - tcg_round =3D NULL; - } + round_const =3D 1ULL << (shift - 1); + tcg_round =3D tcg_const_i64(round_const); =20 for (i =3D 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, memop); - if (accumulate || insert) { + if (accumulate) { read_vec_element(s, tcg_rd, rd, i, memop); } =20 - if (insert) { - handle_shri_with_ins(tcg_rd, tcg_rn, size, shift); - } else { - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, - accumulate, is_u, size, shift); - } + handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, + accumulate, is_u, size, shift); =20 write_vec_element(s, tcg_rd, rd, i, size); } + tcg_temp_free_i64(tcg_round); =20 + done: if (!is_q) { clear_vec_high(s, rd); } +} =20 - if (round) { - tcg_temp_free_i64(tcg_round); - } +static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + uint64_t mask =3D ((0xff << shift) & 0xff) * (-1ull / 0xff); + TCGv_i64 t =3D tcg_temp_new_i64(); + + tcg_gen_shli_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + uint64_t mask =3D ((0xffff << shift) & 0xffff) * (-1ull / 0xffff); + TCGv_i64 t =3D tcg_temp_new_i64(); + + tcg_gen_shli_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift) +{ + tcg_gen_deposit_i32(d, d, a, shift, 32 - shift); +} + +static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift) +{ + tcg_gen_deposit_i64(d, d, a, shift, 64 - shift); +} + +static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigne= d sh) +{ + uint64_t mask =3D (1ull << sh) - 1; + TCGv_vec t =3D tcg_temp_new_vec_matching(d); + TCGv_vec m =3D tcg_temp_new_vec_matching(d); + + tcg_gen_dupi_vec(vece, m, mask); + tcg_gen_shli_vec(vece, t, a, sh); + tcg_gen_and_vec(vece, d, d, m); + tcg_gen_or_vec(vece, d, d, t); + + tcg_temp_free_vec(t); + tcg_temp_free_vec(m); } =20 /* SHL/SLI - Vector shift left */ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, - int immh, int immb, int opcode, int rn, in= t rd) + int immh, int immb, int opcode, int rn, i= nt rd) { int size =3D 32 - clz32(immh) - 1; int immhb =3D immh << 3 | immb; int shift =3D immhb - (8 << size); - int dsize =3D is_q ? 128 : 64; - int esize =3D 8 << size; - int elements =3D dsize/esize; - TCGv_i64 tcg_rn =3D new_tmp_a64(s); - TCGv_i64 tcg_rd =3D new_tmp_a64(s); - int i; =20 if (extract32(immh, 3, 1) && !is_q) { unallocated_encoding(s); @@ -8391,19 +8642,40 @@ static void handle_vec_simd_shli(DisasContext *s, b= ool is_q, bool insert, return; } =20 - for (i =3D 0; i < elements; i++) { - read_vec_element(s, tcg_rn, rn, i, size); - if (insert) { - read_vec_element(s, tcg_rd, rd, i, size); - } - - handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift); - - write_vec_element(s, tcg_rd, rd, i, size); - } - - if (!is_q) { - clear_vec_high(s, rd); + if (insert) { + static const GVecGen2i shi_op[4] =3D { + { .fni8 =3D gen_shl8_ins_i64, + .fniv =3D gen_shl_ins_vec, + .opc =3D INDEX_op_shli_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .vece =3D MO_8 }, + { .fni8 =3D gen_shl16_ins_i64, + .fniv =3D gen_shl_ins_vec, + .opc =3D INDEX_op_shli_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .vece =3D MO_16 }, + { .fni4 =3D gen_shl32_ins_i32, + .fniv =3D gen_shl_ins_vec, + .opc =3D INDEX_op_shli_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .vece =3D MO_32 }, + { .fni8 =3D gen_shl64_ins_i64, + .fniv =3D gen_shl_ins_vec, + .opc =3D INDEX_op_shli_vec, + .prefer_i64 =3D TCG_TARGET_REG_BITS =3D=3D 64, + .load_dest =3D true, + .vece =3D MO_64 }, + }; + tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), is_q ? 16 : 8, + vec_full_reg_size(s), shift, &shi_op[size]); + } else { + tcg_gen_gvec_shli(size, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), is_q ? 16 : 8, + vec_full_reg_size(s), shift); } } =20 diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index efdb009d2a..b29a30a78b 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -1290,7 +1290,11 @@ void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs,= uint32_t aofs, }; =20 tcg_debug_assert(vece <=3D MO_64); - tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]); + if (shift =3D=3D 0) { + tcg_gen_gvec_mov(vece, dofs, aofs, opsz, clsz); + } else { + tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]); + } } =20 void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, unsigned c) @@ -1335,7 +1339,11 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs,= uint32_t aofs, }; =20 tcg_debug_assert(vece <=3D MO_64); - tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]); + if (shift =3D=3D 0) { + tcg_gen_gvec_mov(vece, dofs, aofs, opsz, clsz); + } else { + tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]); + } } =20 void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, unsigned c) @@ -1394,7 +1402,11 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs,= uint32_t aofs, }; =20 tcg_debug_assert(vece <=3D MO_64); - tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]); + if (shift =3D=3D 0) { + tcg_gen_gvec_mov(vece, dofs, aofs, opsz, clsz); + } else { + tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]); + } } =20 static void do_zip(unsigned vece, uint32_t dofs, uint32_t aofs, diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c index a441193b8e..502c5ba891 100644 --- a/tcg/tcg-op-vec.c +++ b/tcg/tcg-op-vec.c @@ -389,11 +389,16 @@ static void do_shifti(TCGOpcode opc, unsigned vece, TCGArg ri =3D temp_arg(rt); TCGArg ai =3D temp_arg(at); TCGType type =3D rt->base_type; - unsigned vecl =3D type - TCG_TYPE_V64; int can; =20 tcg_debug_assert(at->base_type =3D=3D type); tcg_debug_assert(i < (8 << vece)); + + if (i =3D=3D 0) { + tcg_gen_mov_vec(r, a); + return; + } + can =3D tcg_can_emit_vec_op(opc, type, vece); if (can > 0) { vec_gen_3(opc, type, vece, ri, ai, i); @@ -402,7 +407,7 @@ static void do_shifti(TCGOpcode opc, unsigned vece, to the target. Often, but not always, dupi can feed a vector shift easier than a scalar. */ tcg_debug_assert(can < 0); - tcg_expand_vec_op(opc, vecl, vece, ri, ai, i); + tcg_expand_vec_op(opc, type, vece, ri, ai, i); } } =20 --=20 2.14.3