[PATCH RESEND v5 14/57] target/loongarch: Implement xvadd/xvsub

Song Gao posted 57 patches 1 year, 2 months ago
Maintainers: Laurent Vivier <laurent@vivier.eu>, Song Gao <gaosong@loongson.cn>, Xiaojuan Yang <yangxiaojuan@loongson.cn>
There is a newer version of this series
[PATCH RESEND v5 14/57] target/loongarch: Implement xvadd/xvsub
Posted by Song Gao 1 year, 2 months ago
This patch includes:
- XVADD.{B/H/W/D/Q};
- XVSUB.{B/H/W/D/Q}.

Signed-off-by: Song Gao <gaosong@loongson.cn>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/loongarch/insns.decode               |  14 +++
 target/loongarch/disas.c                    |  23 +++++
 target/loongarch/translate.c                |   4 +
 target/loongarch/insn_trans/trans_vec.c.inc | 106 +++++++++++++-------
 4 files changed, 112 insertions(+), 35 deletions(-)

diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index c9c3bc2c73..bcc18fb6c5 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -1296,3 +1296,17 @@ vstelm_d         0011 00010001 0 . ........ ..... .....   @vr_i8i1
 vstelm_w         0011 00010010 .. ........ ..... .....    @vr_i8i2
 vstelm_h         0011 0001010 ... ........ ..... .....    @vr_i8i3
 vstelm_b         0011 000110 .... ........ ..... .....    @vr_i8i4
+
+#
+# LoongArch LASX instructions
+#
+xvadd_b          0111 01000000 10100 ..... ..... .....    @vvv
+xvadd_h          0111 01000000 10101 ..... ..... .....    @vvv
+xvadd_w          0111 01000000 10110 ..... ..... .....    @vvv
+xvadd_d          0111 01000000 10111 ..... ..... .....    @vvv
+xvadd_q          0111 01010010 11010 ..... ..... .....    @vvv
+xvsub_b          0111 01000000 11000 ..... ..... .....    @vvv
+xvsub_h          0111 01000000 11001 ..... ..... .....    @vvv
+xvsub_w          0111 01000000 11010 ..... ..... .....    @vvv
+xvsub_d          0111 01000000 11011 ..... ..... .....    @vvv
+xvsub_q          0111 01010010 11011 ..... ..... .....    @vvv
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index 5c402d944d..d8b62ba532 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -1695,3 +1695,26 @@ INSN_LSX(vstelm_d,         vr_ii)
 INSN_LSX(vstelm_w,         vr_ii)
 INSN_LSX(vstelm_h,         vr_ii)
 INSN_LSX(vstelm_b,         vr_ii)
+
+#define INSN_LASX(insn, type)                               \
+static bool trans_##insn(DisasContext *ctx, arg_##type * a) \
+{                                                           \
+    output_##type ## _x(ctx, a, #insn);                     \
+    return true;                                            \
+}
+
+static void output_vvv_x(DisasContext *ctx, arg_vvv * a, const char *mnemonic)
+{
+    output(ctx, mnemonic, "x%d, x%d, x%d", a->vd, a->vj, a->vk);
+}
+
+INSN_LASX(xvadd_b,           vvv)
+INSN_LASX(xvadd_h,           vvv)
+INSN_LASX(xvadd_w,           vvv)
+INSN_LASX(xvadd_d,           vvv)
+INSN_LASX(xvadd_q,           vvv)
+INSN_LASX(xvsub_b,           vvv)
+INSN_LASX(xvsub_h,           vvv)
+INSN_LASX(xvsub_w,           vvv)
+INSN_LASX(xvsub_d,           vvv)
+INSN_LASX(xvsub_q,           vvv)
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
index 7f3958a1f4..10e2fe8ff6 100644
--- a/target/loongarch/translate.c
+++ b/target/loongarch/translate.c
@@ -124,6 +124,10 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
         ctx->vl = LSX_LEN;
     }
 
+    if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LASX)) {
+        ctx->vl = LASX_LEN;
+    }
+
     ctx->la64 = is_la64(env);
     ctx->va32 = (ctx->base.tb->flags & HW_FLAGS_VA32) != 0;
 
diff --git a/target/loongarch/insn_trans/trans_vec.c.inc b/target/loongarch/insn_trans/trans_vec.c.inc
index a90afd3b82..47cf053e0a 100644
--- a/target/loongarch/insn_trans/trans_vec.c.inc
+++ b/target/loongarch/insn_trans/trans_vec.c.inc
@@ -208,6 +208,16 @@ static bool gvec_vvv(DisasContext *ctx, arg_vvv *a, MemOp mop,
     return gvec_vvv_vl(ctx, a, 16, mop, func);
 }
 
+static bool gvec_xxx(DisasContext *ctx, arg_vvv *a, MemOp mop,
+                     void (*func)(unsigned, uint32_t, uint32_t,
+                                  uint32_t, uint32_t, uint32_t))
+{
+    if (!check_vec(ctx, 32)) {
+        return true;
+    }
+
+    return gvec_vvv_vl(ctx, a, 32, mop, func);
+}
 
 static bool gvec_vv_vl(DisasContext *ctx, arg_vv *a,
                        uint32_t oprsz, MemOp mop,
@@ -279,47 +289,73 @@ TRANS(vadd_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_add)
 TRANS(vadd_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_add)
 TRANS(vadd_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_add)
 TRANS(vadd_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_add)
+TRANS(xvadd_b, LASX, gvec_xxx, MO_8, tcg_gen_gvec_add)
+TRANS(xvadd_h, LASX, gvec_xxx, MO_16, tcg_gen_gvec_add)
+TRANS(xvadd_w, LASX, gvec_xxx, MO_32, tcg_gen_gvec_add)
+TRANS(xvadd_d, LASX, gvec_xxx, MO_64, tcg_gen_gvec_add)
+
+static bool gen_vaddsub_q_vl(DisasContext *ctx, arg_vvv *a, uint32_t oprsz,
+                             void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
+                                          TCGv_i64, TCGv_i64, TCGv_i64))
+{
+    int i;
+    TCGv_i64 rh, rl, ah, al, bh, bl;
+
+    rh = tcg_temp_new_i64();
+    rl = tcg_temp_new_i64();
+    ah = tcg_temp_new_i64();
+    al = tcg_temp_new_i64();
+    bh = tcg_temp_new_i64();
+    bl = tcg_temp_new_i64();
+
+    for (i = 0; i < oprsz / 16; i++) {
+        get_vreg64(ah, a->vj, 1 + i * 2);
+        get_vreg64(al, a->vj, i * 2);
+        get_vreg64(bh, a->vk, 1 + i * 2);
+        get_vreg64(bl, a->vk, i * 2);
+
+        func(rl, rh, al, ah, bl, bh);
 
-#define VADDSUB_Q(NAME)                                        \
-static bool trans_v## NAME ##_q(DisasContext *ctx, arg_vvv *a) \
-{                                                              \
-    TCGv_i64 rh, rl, ah, al, bh, bl;                           \
-                                                               \
-    if (!avail_LSX(ctx)) {                                     \
-        return false;                                          \
-    }                                                          \
-                                                               \
-    if (!check_vec(ctx, 16)) {                                 \
-        return true;                                           \
-    }                                                          \
-                                                               \
-    rh = tcg_temp_new_i64();                                   \
-    rl = tcg_temp_new_i64();                                   \
-    ah = tcg_temp_new_i64();                                   \
-    al = tcg_temp_new_i64();                                   \
-    bh = tcg_temp_new_i64();                                   \
-    bl = tcg_temp_new_i64();                                   \
-                                                               \
-    get_vreg64(ah, a->vj, 1);                                  \
-    get_vreg64(al, a->vj, 0);                                  \
-    get_vreg64(bh, a->vk, 1);                                  \
-    get_vreg64(bl, a->vk, 0);                                  \
-                                                               \
-    tcg_gen_## NAME ##2_i64(rl, rh, al, ah, bl, bh);           \
-                                                               \
-    set_vreg64(rh, a->vd, 1);                                  \
-    set_vreg64(rl, a->vd, 0);                                  \
-                                                               \
-    return true;                                               \
-}
-
-VADDSUB_Q(add)
-VADDSUB_Q(sub)
+        set_vreg64(rh, a->vd, 1 + i * 2);
+        set_vreg64(rl, a->vd, i * 2);
+    }
+    return true;
+}
+
+static bool gen_vaddsub_q(DisasContext *ctx, arg_vvv *a,
+                          void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
+                                       TCGv_i64, TCGv_i64, TCGv_i64))
+{
+    if (!check_vec(ctx, 16)) {
+        return true;
+    }
+
+    return gen_vaddsub_q_vl(ctx, a, 16, func);
+}
+
+static bool gen_xvaddsub_q(DisasContext *ctx, arg_vvv *a,
+                           void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
+                                        TCGv_i64, TCGv_i64, TCGv_i64))
+{
+    if (!check_vec(ctx, 32)) {
+        return true;
+    }
+    return gen_vaddsub_q_vl(ctx, a, 16, func);
+}
 
 TRANS(vsub_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sub)
 TRANS(vsub_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sub)
 TRANS(vsub_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sub)
 TRANS(vsub_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sub)
+TRANS(xvsub_b, LASX, gvec_xxx, MO_8, tcg_gen_gvec_sub)
+TRANS(xvsub_h, LASX, gvec_xxx, MO_16, tcg_gen_gvec_sub)
+TRANS(xvsub_w, LASX, gvec_xxx, MO_32, tcg_gen_gvec_sub)
+TRANS(xvsub_d, LASX, gvec_xxx, MO_64, tcg_gen_gvec_sub)
+
+TRANS(vadd_q, LSX, gen_vaddsub_q, tcg_gen_add2_i64)
+TRANS(vsub_q, LSX, gen_vaddsub_q, tcg_gen_sub2_i64)
+TRANS(xvadd_q, LASX, gen_xvaddsub_q, tcg_gen_add2_i64)
+TRANS(xvsub_q, LASX, gen_xvaddsub_q, tcg_gen_sub2_i64)
 
 TRANS(vaddi_bu, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_addi)
 TRANS(vaddi_hu, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_addi)
-- 
2.39.1
Re: [PATCH RESEND v5 14/57] target/loongarch: Implement xvadd/xvsub
Posted by Richard Henderson 1 year, 2 months ago
On 9/7/23 01:31, Song Gao wrote:
> --- a/target/loongarch/insn_trans/trans_vec.c.inc
> +++ b/target/loongarch/insn_trans/trans_vec.c.inc
> @@ -208,6 +208,16 @@ static bool gvec_vvv(DisasContext *ctx, arg_vvv *a, MemOp mop,
>       return gvec_vvv_vl(ctx, a, 16, mop, func);
>   }
>   
> +static bool gvec_xxx(DisasContext *ctx, arg_vvv *a, MemOp mop,
> +                     void (*func)(unsigned, uint32_t, uint32_t,
> +                                  uint32_t, uint32_t, uint32_t))
> +{
> +    if (!check_vec(ctx, 32)) {
> +        return true;
> +    }
> +
> +    return gvec_vvv_vl(ctx, a, 32, mop, func);
> +}

You can move check_vec into gvec_vvv_vl, removing it from gvec_vvv.

> +static bool gen_vaddsub_q_vl(DisasContext *ctx, arg_vvv *a, uint32_t oprsz,
> +                             void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
> +                                          TCGv_i64, TCGv_i64, TCGv_i64))
> +{
> +    int i;
> +    TCGv_i64 rh, rl, ah, al, bh, bl;

Have check_vec here ...

> +static bool gen_vaddsub_q(DisasContext *ctx, arg_vvv *a,
> +                          void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
> +                                       TCGv_i64, TCGv_i64, TCGv_i64))
> +{
> +    if (!check_vec(ctx, 16)) {
> +        return true;
> +    }
> +
> +    return gen_vaddsub_q_vl(ctx, a, 16, func);
> +}
> +
> +static bool gen_xvaddsub_q(DisasContext *ctx, arg_vvv *a,
> +                           void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
> +                                        TCGv_i64, TCGv_i64, TCGv_i64))
> +{
> +    if (!check_vec(ctx, 32)) {
> +        return true;
> +    }
> +    return gen_vaddsub_q_vl(ctx, a, 16, func);
> +}

... instead of these two places.


r~
Re: [PATCH RESEND v5 14/57] target/loongarch: Implement xvadd/xvsub
Posted by gaosong 1 year, 2 months ago
在 2023/9/10 上午9:44, Richard Henderson 写道:
> On 9/7/23 01:31, Song Gao wrote:
>> --- a/target/loongarch/insn_trans/trans_vec.c.inc
>> +++ b/target/loongarch/insn_trans/trans_vec.c.inc
>> @@ -208,6 +208,16 @@ static bool gvec_vvv(DisasContext *ctx, arg_vvv 
>> *a, MemOp mop,
>>       return gvec_vvv_vl(ctx, a, 16, mop, func);
>>   }
>> +static bool gvec_xxx(DisasContext *ctx, arg_vvv *a, MemOp mop,
>> +                     void (*func)(unsigned, uint32_t, uint32_t,
>> +                                  uint32_t, uint32_t, uint32_t))
>> +{
>> +    if (!check_vec(ctx, 32)) {
>> +        return true;
>> +    }
>> +
>> +    return gvec_vvv_vl(ctx, a, 32, mop, func);
>> +}
> 
> You can move check_vec into gvec_vvv_vl, removing it from gvec_vvv.
> 
>> +static bool gen_vaddsub_q_vl(DisasContext *ctx, arg_vvv *a, uint32_t 
>> oprsz,
>> +                             void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
>> +                                          TCGv_i64, TCGv_i64, TCGv_i64))
>> +{
>> +    int i;
>> +    TCGv_i64 rh, rl, ah, al, bh, bl;
> 
> Have check_vec here ...
> 
>> +static bool gen_vaddsub_q(DisasContext *ctx, arg_vvv *a,
>> +                          void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
>> +                                       TCGv_i64, TCGv_i64, TCGv_i64))
>> +{
>> +    if (!check_vec(ctx, 16)) {
>> +        return true;
>> +    }
>> +
>> +    return gen_vaddsub_q_vl(ctx, a, 16, func);
>> +}
>> +
>> +static bool gen_xvaddsub_q(DisasContext *ctx, arg_vvv *a,
>> +                           void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
>> +                                        TCGv_i64, TCGv_i64, TCGv_i64))
>> +{
>> +    if (!check_vec(ctx, 32)) {
>> +        return true;
>> +    }
>> +    return gen_vaddsub_q_vl(ctx, a, 16, func);
>> +}
> 
> ... instead of these two places.
> 
> 
Ok, I will correct all similar patches.

Thanks.
Song Gao


Re: [PATCH RESEND v5 14/57] target/loongarch: Implement xvadd/xvsub
Posted by gaosong 1 year, 2 months ago
在 2023/9/7 下午4:31, Song Gao 写道:
> +static bool gen_xvaddsub_q(DisasContext *ctx, arg_vvv *a,
> +                           void (*func)(TCGv_i64, TCGv_i64, TCGv_i64,
> +                                        TCGv_i64, TCGv_i64, TCGv_i64))
> +{
> +    if (!check_vec(ctx, 32)) {
> +        return true;
> +    }
> +    return gen_vaddsub_q_vl(ctx, a, 16, func);
> +}
Typo,  16->32,  I will correct it on v6.

Thanks.
Song Gao