From nobody Thu Nov 28 09:43:55 2024 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1694075865212693.4652453726941; Thu, 7 Sep 2023 01:37:45 -0700 (PDT) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1qeASa-0008Ga-Hm; Thu, 07 Sep 2023 04:33:56 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1qeASY-00084l-Qu for qemu-devel@nongnu.org; Thu, 07 Sep 2023 04:33:54 -0400 Received: from mail.loongson.cn ([114.242.206.163]) by eggs.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1qeAST-0003hR-2j for qemu-devel@nongnu.org; Thu, 07 Sep 2023 04:33:54 -0400 Received: from loongson.cn (unknown [10.2.5.185]) by gateway (Coremail) with SMTP id _____8AxlPCSivlkBDkhAA--.783S3; Thu, 07 Sep 2023 16:32:18 +0800 (CST) Received: from localhost.localdomain (unknown [10.2.5.185]) by localhost.localdomain (Coremail) with SMTP id AQAAf8Bxxsx+ivlk8FVwAA--.49124S22; Thu, 07 Sep 2023 16:32:18 +0800 (CST) From: Song Gao To: qemu-devel@nongnu.org Cc: richard.henderson@linaro.org, maobibo@loongson.cn Subject: [PATCH RESEND v5 20/57] target/loongarch: Implement xvaddw/xvsubw Date: Thu, 7 Sep 2023 16:31:21 +0800 Message-Id: <20230907083158.3975132-21-gaosong@loongson.cn> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230907083158.3975132-1-gaosong@loongson.cn> References: <20230907083158.3975132-1-gaosong@loongson.cn> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: AQAAf8Bxxsx+ivlk8FVwAA--.49124S22 X-CM-SenderInfo: 5jdr20tqj6z05rqj20fqof0/ X-Coremail-Antispam: 1Uk129KBjDUn29KB7ZKAUJUUUUU529EdanIXcx71UUUUU7KY7 ZEXasCq-sGcSsGvfJ3UbIjqfuFe4nvWSU5nxnvy29KBjDU0xBIdaVrnUUvcSsGvfC2Kfnx nUUI43ZEXa7xR_UUUUUUUUU== Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=114.242.206.163; envelope-from=gaosong@loongson.cn; helo=mail.loongson.cn X-Spam_score_int: -18 X-Spam_score: -1.9 X-Spam_bar: - X-Spam_report: (-1.9 / 5.0 requ) BAYES_00=-1.9, SPF_HELO_NONE=0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: qemu-devel-bounces+importer=patchew.org@nongnu.org X-ZM-MESSAGEID: 1694075866917100005 Content-Type: text/plain; charset="utf-8" This patch includes: - XVADDW{EV/OD}.{H.B/W.H/D.W/Q.D}[U]; - XVSUBW{EV/OD}.{H.B/W.H/D.W/Q.D}[U]; - XVADDW{EV/OD}.{H.BU.B/W.HU.H/D.WU.W/Q.DU.D}. Signed-off-by: Song Gao Reviewed-by: Richard Henderson --- target/loongarch/insns.decode | 45 ++++++++ target/loongarch/disas.c | 43 +++++++ target/loongarch/vec_helper.c | 120 ++++++++++++++------ target/loongarch/insn_trans/trans_vec.c.inc | 41 +++++++ 4 files changed, 215 insertions(+), 34 deletions(-) diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode index ba0b36f4a7..e1d8b30179 100644 --- a/target/loongarch/insns.decode +++ b/target/loongarch/insns.decode @@ -1361,6 +1361,51 @@ xvhsubw_wu_hu 0111 01000101 10101 ..... ..... ...= .. @vvv xvhsubw_du_wu 0111 01000101 10110 ..... ..... ..... @vvv xvhsubw_qu_du 0111 01000101 10111 ..... ..... ..... @vvv =20 +xvaddwev_h_b 0111 01000001 11100 ..... ..... ..... @vvv +xvaddwev_w_h 0111 01000001 11101 ..... ..... ..... @vvv +xvaddwev_d_w 0111 01000001 11110 ..... ..... ..... @vvv +xvaddwev_q_d 0111 01000001 11111 ..... ..... ..... @vvv +xvaddwod_h_b 0111 01000010 00100 ..... ..... ..... @vvv +xvaddwod_w_h 0111 01000010 00101 ..... ..... ..... @vvv +xvaddwod_d_w 0111 01000010 00110 ..... ..... ..... @vvv +xvaddwod_q_d 0111 01000010 00111 ..... ..... ..... @vvv + +xvsubwev_h_b 0111 01000010 00000 ..... ..... ..... @vvv +xvsubwev_w_h 0111 01000010 00001 ..... ..... ..... @vvv +xvsubwev_d_w 0111 01000010 00010 ..... ..... ..... @vvv +xvsubwev_q_d 0111 01000010 00011 ..... ..... ..... @vvv +xvsubwod_h_b 0111 01000010 01000 ..... ..... ..... @vvv +xvsubwod_w_h 0111 01000010 01001 ..... ..... ..... @vvv +xvsubwod_d_w 0111 01000010 01010 ..... ..... ..... @vvv +xvsubwod_q_d 0111 01000010 01011 ..... ..... ..... @vvv + +xvaddwev_h_bu 0111 01000010 11100 ..... ..... ..... @vvv +xvaddwev_w_hu 0111 01000010 11101 ..... ..... ..... @vvv +xvaddwev_d_wu 0111 01000010 11110 ..... ..... ..... @vvv +xvaddwev_q_du 0111 01000010 11111 ..... ..... ..... @vvv +xvaddwod_h_bu 0111 01000011 00100 ..... ..... ..... @vvv +xvaddwod_w_hu 0111 01000011 00101 ..... ..... ..... @vvv +xvaddwod_d_wu 0111 01000011 00110 ..... ..... ..... @vvv +xvaddwod_q_du 0111 01000011 00111 ..... ..... ..... @vvv + +xvsubwev_h_bu 0111 01000011 00000 ..... ..... ..... @vvv +xvsubwev_w_hu 0111 01000011 00001 ..... ..... ..... @vvv +xvsubwev_d_wu 0111 01000011 00010 ..... ..... ..... @vvv +xvsubwev_q_du 0111 01000011 00011 ..... ..... ..... @vvv +xvsubwod_h_bu 0111 01000011 01000 ..... ..... ..... @vvv +xvsubwod_w_hu 0111 01000011 01001 ..... ..... ..... @vvv +xvsubwod_d_wu 0111 01000011 01010 ..... ..... ..... @vvv +xvsubwod_q_du 0111 01000011 01011 ..... ..... ..... @vvv + +xvaddwev_h_bu_b 0111 01000011 11100 ..... ..... ..... @vvv +xvaddwev_w_hu_h 0111 01000011 11101 ..... ..... ..... @vvv +xvaddwev_d_wu_w 0111 01000011 11110 ..... ..... ..... @vvv +xvaddwev_q_du_d 0111 01000011 11111 ..... ..... ..... @vvv +xvaddwod_h_bu_b 0111 01000100 00000 ..... ..... ..... @vvv +xvaddwod_w_hu_h 0111 01000100 00001 ..... ..... ..... @vvv +xvaddwod_d_wu_w 0111 01000100 00010 ..... ..... ..... @vvv +xvaddwod_q_du_d 0111 01000100 00011 ..... ..... ..... @vvv + xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c index c810a52f0d..e3e57e1d05 100644 --- a/target/loongarch/disas.c +++ b/target/loongarch/disas.c @@ -1782,6 +1782,49 @@ INSN_LASX(xvhsubw_wu_hu, vvv) INSN_LASX(xvhsubw_du_wu, vvv) INSN_LASX(xvhsubw_qu_du, vvv) =20 +INSN_LASX(xvaddwev_h_b, vvv) +INSN_LASX(xvaddwev_w_h, vvv) +INSN_LASX(xvaddwev_d_w, vvv) +INSN_LASX(xvaddwev_q_d, vvv) +INSN_LASX(xvaddwod_h_b, vvv) +INSN_LASX(xvaddwod_w_h, vvv) +INSN_LASX(xvaddwod_d_w, vvv) +INSN_LASX(xvaddwod_q_d, vvv) +INSN_LASX(xvsubwev_h_b, vvv) +INSN_LASX(xvsubwev_w_h, vvv) +INSN_LASX(xvsubwev_d_w, vvv) +INSN_LASX(xvsubwev_q_d, vvv) +INSN_LASX(xvsubwod_h_b, vvv) +INSN_LASX(xvsubwod_w_h, vvv) +INSN_LASX(xvsubwod_d_w, vvv) +INSN_LASX(xvsubwod_q_d, vvv) + +INSN_LASX(xvaddwev_h_bu, vvv) +INSN_LASX(xvaddwev_w_hu, vvv) +INSN_LASX(xvaddwev_d_wu, vvv) +INSN_LASX(xvaddwev_q_du, vvv) +INSN_LASX(xvaddwod_h_bu, vvv) +INSN_LASX(xvaddwod_w_hu, vvv) +INSN_LASX(xvaddwod_d_wu, vvv) +INSN_LASX(xvaddwod_q_du, vvv) +INSN_LASX(xvsubwev_h_bu, vvv) +INSN_LASX(xvsubwev_w_hu, vvv) +INSN_LASX(xvsubwev_d_wu, vvv) +INSN_LASX(xvsubwev_q_du, vvv) +INSN_LASX(xvsubwod_h_bu, vvv) +INSN_LASX(xvsubwod_w_hu, vvv) +INSN_LASX(xvsubwod_d_wu, vvv) +INSN_LASX(xvsubwod_q_du, vvv) + +INSN_LASX(xvaddwev_h_bu_b, vvv) +INSN_LASX(xvaddwev_w_hu_h, vvv) +INSN_LASX(xvaddwev_d_wu_w, vvv) +INSN_LASX(xvaddwev_q_du_d, vvv) +INSN_LASX(xvaddwod_h_bu_b, vvv) +INSN_LASX(xvaddwod_w_hu_h, vvv) +INSN_LASX(xvaddwod_d_wu_w, vvv) +INSN_LASX(xvaddwod_q_du_d, vvv) + INSN_LASX(xvreplgr2vr_b, vr) INSN_LASX(xvreplgr2vr_h, vr) INSN_LASX(xvreplgr2vr_w, vr) diff --git a/target/loongarch/vec_helper.c b/target/loongarch/vec_helper.c index 2ce0ca41a7..fc3b07e8d2 100644 --- a/target/loongarch/vec_helper.c +++ b/target/loongarch/vec_helper.c @@ -106,133 +106,173 @@ void HELPER(vhsubw_qu_du)(void *vd, void *vj, void = *vk, uint32_t desc) } =20 #define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \ -void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \ { \ int i; \ VReg *Vd =3D (VReg *)vd; \ VReg *Vj =3D (VReg *)vj; \ VReg *Vk =3D (VReg *)vk; \ typedef __typeof(Vd->E1(0)) TD; \ - for (i =3D 0; i < LSX_LEN/BIT; i++) { \ + int oprsz =3D simd_oprsz(desc); \ + \ + for (i =3D 0; i < oprsz / (BIT / 8); i++) { \ Vd->E1(i) =3D DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \ } \ } =20 #define DO_ODD(NAME, BIT, E1, E2, DO_OP) \ -void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \ { \ int i; \ VReg *Vd =3D (VReg *)vd; = \ VReg *Vj =3D (VReg *)vj; = \ VReg *Vk =3D (VReg *)vk; = \ typedef __typeof(Vd->E1(0)) TD; \ - for (i =3D 0; i < LSX_LEN/BIT; i++) { = \ + int oprsz =3D simd_oprsz(desc); = \ + \ + for (i =3D 0; i < oprsz / (BIT / 8); i++) { = \ Vd->E1(i) =3D DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1));= \ } \ } =20 -void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D= (0))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_add(int128_makes64(Vj->D(2 * i)), + int128_makes64(Vk->D(2 * i))); + } } =20 DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD) DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD) DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD) =20 -void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D= (1))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_add(int128_makes64(Vj->D(2 * i +1)), + int128_makes64(Vk->D(2 * i +1))); + } } =20 DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD) DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD) DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD) =20 -void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D= (0))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_sub(int128_makes64(Vj->D(2 * i)), + int128_makes64(Vk->D(2 * i))); + } } =20 DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB) DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB) DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB) =20 -void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D= (1))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_sub(int128_makes64(Vj->D(2 * i + 1)), + int128_makes64(Vk->D(2 * i + 1))); + } } =20 DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB) DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB) DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB) =20 -void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_add(int128_make64((uint64_t)Vj->D(0)), - int128_make64((uint64_t)Vk->D(0))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_add(int128_make64(Vj->UD(2 * i)), + int128_make64(Vk->UD(2 * i))); + } } =20 DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD) DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD) DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD) =20 -void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_add(int128_make64((uint64_t)Vj->D(1)), - int128_make64((uint64_t)Vk->D(1))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_add(int128_make64(Vj->UD(2 * i + 1)), + int128_make64(Vk->UD(2 * i + 1))); + } } =20 DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD) DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD) DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD) =20 -void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_sub(int128_make64((uint64_t)Vj->D(0)), - int128_make64((uint64_t)Vk->D(0))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_sub(int128_make64(Vj->UD(2 * i)), + int128_make64(Vk->UD(2 * i))); + } } =20 DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB) DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB) DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB) =20 -void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_sub(int128_make64((uint64_t)Vj->D(1)), - int128_make64((uint64_t)Vk->D(1))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_sub(int128_make64(Vj->UD(2 * i + 1)), + int128_make64(Vk->UD(2 * i + 1))); + } } =20 DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB) @@ -240,7 +280,7 @@ DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB) DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB) =20 #define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ -void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \ { \ int i; \ VReg *Vd =3D (VReg *)vd; \ @@ -248,13 +288,15 @@ void HELPER(NAME)(void *vd, void *vj, void *vk, uint3= 2_t v) \ VReg *Vk =3D (VReg *)vk; \ typedef __typeof(Vd->ES1(0)) TDS; \ typedef __typeof(Vd->EU1(0)) TDU; \ - for (i =3D 0; i < LSX_LEN/BIT; i++) { \ + int oprsz =3D simd_oprsz(desc); \ + \ + for (i =3D 0; i < oprsz / (BIT / 8); i++) { \ Vd->ES1(i) =3D DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \ } \ } =20 #define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) = \ -void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) = \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) = \ { = \ int i; = \ VReg *Vd =3D (VReg *)vd; = \ @@ -262,33 +304,43 @@ void HELPER(NAME)(void *vd, void *vj, void *vk, uint3= 2_t v) \ VReg *Vk =3D (VReg *)vk; = \ typedef __typeof(Vd->ES1(0)) TDS; = \ typedef __typeof(Vd->EU1(0)) TDU; = \ - for (i =3D 0; i < LSX_LEN/BIT; i++) { = \ + int oprsz =3D simd_oprsz(desc); = \ + = \ + for (i =3D 0; i < oprsz / (BIT / 8); i++) { = \ Vd->ES1(i) =3D DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i += 1)); \ } = \ } =20 -void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_add(int128_make64((uint64_t)Vj->D(0)), - int128_makes64(Vk->D(0))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_add(int128_make64(Vj->UD(2 * i)), + int128_makes64(Vk->D(2 * i))); + } } =20 DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD) DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD) DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD) =20 -void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v) +void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc) { + int i; VReg *Vd =3D (VReg *)vd; VReg *Vj =3D (VReg *)vj; VReg *Vk =3D (VReg *)vk; + int oprsz =3D simd_oprsz(desc); =20 - Vd->Q(0) =3D int128_add(int128_make64((uint64_t)Vj->D(1)), - int128_makes64(Vk->D(1))); + for (i =3D 0; i < oprsz / 16; i++) { + Vd->Q(i) =3D int128_add(int128_make64(Vj->UD(2 * i + 1)), + int128_makes64(Vk->D(2 * i + 1))); + } } =20 DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD) diff --git a/target/loongarch/insn_trans/trans_vec.c.inc b/target/loongarch= /insn_trans/trans_vec.c.inc index b2bc11fed1..8234d4670a 100644 --- a/target/loongarch/insn_trans/trans_vec.c.inc +++ b/target/loongarch/insn_trans/trans_vec.c.inc @@ -573,6 +573,10 @@ TRANS(vaddwev_h_b, LSX, gvec_vvv, MO_8, do_vaddwev_s) TRANS(vaddwev_w_h, LSX, gvec_vvv, MO_16, do_vaddwev_s) TRANS(vaddwev_d_w, LSX, gvec_vvv, MO_32, do_vaddwev_s) TRANS(vaddwev_q_d, LSX, gvec_vvv, MO_64, do_vaddwev_s) +TRANS(xvaddwev_h_b, LASX, gvec_xxx, MO_8, do_vaddwev_s) +TRANS(xvaddwev_w_h, LASX, gvec_xxx, MO_16, do_vaddwev_s) +TRANS(xvaddwev_d_w, LASX, gvec_xxx, MO_32, do_vaddwev_s) +TRANS(xvaddwev_q_d, LASX, gvec_xxx, MO_64, do_vaddwev_s) =20 static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) { @@ -652,6 +656,11 @@ TRANS(vaddwod_h_b, LSX, gvec_vvv, MO_8, do_vaddwod_s) TRANS(vaddwod_w_h, LSX, gvec_vvv, MO_16, do_vaddwod_s) TRANS(vaddwod_d_w, LSX, gvec_vvv, MO_32, do_vaddwod_s) TRANS(vaddwod_q_d, LSX, gvec_vvv, MO_64, do_vaddwod_s) +TRANS(xvaddwod_h_b, LASX, gvec_xxx, MO_8, do_vaddwod_s) +TRANS(xvaddwod_w_h, LASX, gvec_xxx, MO_16, do_vaddwod_s) +TRANS(xvaddwod_d_w, LASX, gvec_xxx, MO_32, do_vaddwod_s) +TRANS(xvaddwod_q_d, LASX, gvec_xxx, MO_64, do_vaddwod_s) + =20 static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec = b) { @@ -735,6 +744,10 @@ TRANS(vsubwev_h_b, LSX, gvec_vvv, MO_8, do_vsubwev_s) TRANS(vsubwev_w_h, LSX, gvec_vvv, MO_16, do_vsubwev_s) TRANS(vsubwev_d_w, LSX, gvec_vvv, MO_32, do_vsubwev_s) TRANS(vsubwev_q_d, LSX, gvec_vvv, MO_64, do_vsubwev_s) +TRANS(xvsubwev_h_b, LASX, gvec_xxx, MO_8, do_vsubwev_s) +TRANS(xvsubwev_w_h, LASX, gvec_xxx, MO_16, do_vsubwev_s) +TRANS(xvsubwev_d_w, LASX, gvec_xxx, MO_32, do_vsubwev_s) +TRANS(xvsubwev_q_d, LASX, gvec_xxx, MO_64, do_vsubwev_s) =20 static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec = b) { @@ -814,6 +827,10 @@ TRANS(vsubwod_h_b, LSX, gvec_vvv, MO_8, do_vsubwod_s) TRANS(vsubwod_w_h, LSX, gvec_vvv, MO_16, do_vsubwod_s) TRANS(vsubwod_d_w, LSX, gvec_vvv, MO_32, do_vsubwod_s) TRANS(vsubwod_q_d, LSX, gvec_vvv, MO_64, do_vsubwod_s) +TRANS(xvsubwod_h_b, LASX, gvec_xxx, MO_8, do_vsubwod_s) +TRANS(xvsubwod_w_h, LASX, gvec_xxx, MO_16, do_vsubwod_s) +TRANS(xvsubwod_d_w, LASX, gvec_xxx, MO_32, do_vsubwod_s) +TRANS(xvsubwod_q_d, LASX, gvec_xxx, MO_64, do_vsubwod_s) =20 static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec = b) { @@ -889,6 +906,10 @@ TRANS(vaddwev_h_bu, LSX, gvec_vvv, MO_8, do_vaddwev_u) TRANS(vaddwev_w_hu, LSX, gvec_vvv, MO_16, do_vaddwev_u) TRANS(vaddwev_d_wu, LSX, gvec_vvv, MO_32, do_vaddwev_u) TRANS(vaddwev_q_du, LSX, gvec_vvv, MO_64, do_vaddwev_u) +TRANS(xvaddwev_h_bu, LASX, gvec_xxx, MO_8, do_vaddwev_u) +TRANS(xvaddwev_w_hu, LASX, gvec_xxx, MO_16, do_vaddwev_u) +TRANS(xvaddwev_d_wu, LASX, gvec_xxx, MO_32, do_vaddwev_u) +TRANS(xvaddwev_q_du, LASX, gvec_xxx, MO_64, do_vaddwev_u) =20 static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec = b) { @@ -968,6 +989,10 @@ TRANS(vaddwod_h_bu, LSX, gvec_vvv, MO_8, do_vaddwod_u) TRANS(vaddwod_w_hu, LSX, gvec_vvv, MO_16, do_vaddwod_u) TRANS(vaddwod_d_wu, LSX, gvec_vvv, MO_32, do_vaddwod_u) TRANS(vaddwod_q_du, LSX, gvec_vvv, MO_64, do_vaddwod_u) +TRANS(xvaddwod_h_bu, LASX, gvec_xxx, MO_8, do_vaddwod_u) +TRANS(xvaddwod_w_hu, LASX, gvec_xxx, MO_16, do_vaddwod_u) +TRANS(xvaddwod_d_wu, LASX, gvec_xxx, MO_32, do_vaddwod_u) +TRANS(xvaddwod_q_du, LASX, gvec_xxx, MO_64, do_vaddwod_u) =20 static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec = b) { @@ -1043,6 +1068,10 @@ TRANS(vsubwev_h_bu, LSX, gvec_vvv, MO_8, do_vsubwev_= u) TRANS(vsubwev_w_hu, LSX, gvec_vvv, MO_16, do_vsubwev_u) TRANS(vsubwev_d_wu, LSX, gvec_vvv, MO_32, do_vsubwev_u) TRANS(vsubwev_q_du, LSX, gvec_vvv, MO_64, do_vsubwev_u) +TRANS(xvsubwev_h_bu, LASX, gvec_xxx, MO_8, do_vsubwev_u) +TRANS(xvsubwev_w_hu, LASX, gvec_xxx, MO_16, do_vsubwev_u) +TRANS(xvsubwev_d_wu, LASX, gvec_xxx, MO_32, do_vsubwev_u) +TRANS(xvsubwev_q_du, LASX, gvec_xxx, MO_64, do_vsubwev_u) =20 static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec = b) { @@ -1122,6 +1151,10 @@ TRANS(vsubwod_h_bu, LSX, gvec_vvv, MO_8, do_vsubwod_= u) TRANS(vsubwod_w_hu, LSX, gvec_vvv, MO_16, do_vsubwod_u) TRANS(vsubwod_d_wu, LSX, gvec_vvv, MO_32, do_vsubwod_u) TRANS(vsubwod_q_du, LSX, gvec_vvv, MO_64, do_vsubwod_u) +TRANS(xvsubwod_h_bu, LASX, gvec_xxx, MO_8, do_vsubwod_u) +TRANS(xvsubwod_w_hu, LASX, gvec_xxx, MO_16, do_vsubwod_u) +TRANS(xvsubwod_d_wu, LASX, gvec_xxx, MO_32, do_vsubwod_u) +TRANS(xvsubwod_q_du, LASX, gvec_xxx, MO_64, do_vsubwod_u) =20 static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_ve= c b) { @@ -1205,6 +1238,10 @@ TRANS(vaddwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwe= v_u_s) TRANS(vaddwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwev_u_s) TRANS(vaddwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwev_u_s) TRANS(vaddwev_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwev_u_s) +TRANS(xvaddwev_h_bu_b, LASX, gvec_xxx, MO_8, do_vaddwev_u_s) +TRANS(xvaddwev_w_hu_h, LASX, gvec_xxx, MO_16, do_vaddwev_u_s) +TRANS(xvaddwev_d_wu_w, LASX, gvec_xxx, MO_32, do_vaddwev_u_s) +TRANS(xvaddwev_q_du_d, LASX, gvec_xxx, MO_64, do_vaddwev_u_s) =20 static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_ve= c b) { @@ -1285,6 +1322,10 @@ TRANS(vaddwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwo= d_u_s) TRANS(vaddwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwod_u_s) TRANS(vaddwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwod_u_s) TRANS(vaddwod_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwod_u_s) +TRANS(xvaddwod_h_bu_b, LSX, gvec_xxx, MO_8, do_vaddwod_u_s) +TRANS(xvaddwod_w_hu_h, LSX, gvec_xxx, MO_16, do_vaddwod_u_s) +TRANS(xvaddwod_d_wu_w, LSX, gvec_xxx, MO_32, do_vaddwod_u_s) +TRANS(xvaddwod_q_du_d, LSX, gvec_xxx, MO_64, do_vaddwod_u_s) =20 static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, void (*gen_shr_vec)(unsigned, TCGv_vec, --=20 2.39.1