[PATCH v2 08/14] tcg/riscv: Implement vector cmp ops

LIU Zhiwei posted 14 patches 2 months, 3 weeks ago
There is a newer version of this series
[PATCH v2 08/14] tcg/riscv: Implement vector cmp ops
Posted by LIU Zhiwei 2 months, 3 weeks ago
From: TANG Tiancheng <tangtiancheng.ttc@alibaba-inc.com>

1.Address immediate value constraints in RISC-V Vector Extension 1.0 for
comparison instructions.

2.Extend comparison results from mask registers to SEW-width elements,
  following recommendations in The RISC-V SPEC Volume I (Version 20240411).

This aligns with TCG's cmp_vec behavior by expanding compare results to
full element width: all 1s for true, all 0s for false.

Signed-off-by: TANG Tiancheng <tangtiancheng.ttc@alibaba-inc.com>
Reviewed-by: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
---
 tcg/riscv/tcg-target-con-set.h |   6 +-
 tcg/riscv/tcg-target.c.inc     | 240 +++++++++++++++++++++++++++++++++
 tcg/riscv/tcg-target.opc.h     |   5 +
 3 files changed, 250 insertions(+), 1 deletion(-)

diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
index 7277cb9af8..6c9ad5188b 100644
--- a/tcg/riscv/tcg-target-con-set.h
+++ b/tcg/riscv/tcg-target-con-set.h
@@ -21,7 +21,11 @@ C_O1_I2(r, rZ, rZ)
 C_N1_I2(r, r, rM)
 C_O1_I4(r, r, rI, rM, rM)
 C_O2_I4(r, r, rZ, rZ, rM, rM)
+C_O0_I1(v)
+C_O0_I2(v, v)
 C_O0_I2(v, r)
-C_O0_I2(v, vK)
 C_O1_I1(v, r)
 C_O1_I1(v, v)
+C_O1_I2(v, v, v)
+C_O1_I2(v, v, vi)
+C_O1_I2(v, v, vK)
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index fde4e71260..1e8c0fb031 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -312,6 +312,9 @@ typedef enum {
     OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
     OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
 
+    OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI,
+    OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV,
+
     OPC_VADD_VV = 0x57 | V_OPIVV,
     OPC_VSUB_VV = 0x8000057 | V_OPIVV,
     OPC_VAND_VV = 0x24000057 | V_OPIVV,
@@ -319,6 +322,29 @@ typedef enum {
     OPC_VXOR_VV = 0x2c000057 | V_OPIVV,
     OPC_VXOR_VI = 0x2c000057 | V_OPIVI,
 
+    OPC_VMSEQ_VV = 0x60000057 | V_OPIVV,
+    OPC_VMSEQ_VI = 0x60000057 | V_OPIVI,
+    OPC_VMSEQ_VX = 0x60000057 | V_OPIVX,
+    OPC_VMSNE_VV = 0x64000057 | V_OPIVV,
+    OPC_VMSNE_VI = 0x64000057 | V_OPIVI,
+    OPC_VMSNE_VX = 0x64000057 | V_OPIVX,
+
+    OPC_VMSLTU_VV = 0x68000057 | V_OPIVV,
+    OPC_VMSLTU_VX = 0x68000057 | V_OPIVX,
+    OPC_VMSLT_VV = 0x6c000057 | V_OPIVV,
+    OPC_VMSLT_VX = 0x6c000057 | V_OPIVX,
+    OPC_VMSLEU_VV = 0x70000057 | V_OPIVV,
+    OPC_VMSLEU_VX = 0x70000057 | V_OPIVX,
+    OPC_VMSLE_VV = 0x74000057 | V_OPIVV,
+    OPC_VMSLE_VX = 0x74000057 | V_OPIVX,
+
+    OPC_VMSLEU_VI = 0x70000057 | V_OPIVI,
+    OPC_VMSLE_VI = 0x74000057 | V_OPIVI,
+    OPC_VMSGTU_VI = 0x78000057 | V_OPIVI,
+    OPC_VMSGTU_VX = 0x78000057 | V_OPIVX,
+    OPC_VMSGT_VI = 0x7c000057 | V_OPIVI,
+    OPC_VMSGT_VX = 0x7c000057 | V_OPIVX,
+
     OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
     OPC_VMV_V_I = 0x5e000057 | V_OPIVI,
     OPC_VMV_V_X = 0x5e000057 | V_OPIVX,
@@ -595,6 +621,12 @@ static void tcg_out_opc_vec_config(TCGContext *s, RISCVInsn opc,
 #define tcg_out_opc_vi(s, opc, vd, vs2, imm, vm) \
     tcg_out_opc_reg_vec_i(s, opc, vd, imm, vs2, vm);
 
+#define tcg_out_opc_vim_mask(s, opc, vd, vs2, imm) \
+    tcg_out_opc_reg_vec_i(s, opc, vd, imm, vs2, false);
+
+#define tcg_out_opc_vvm_mask(s, opc, vd, vs2, vs1) \
+    tcg_out_opc_reg_vec(s, opc, vd, vs1, vs2, false);
+
 #define tcg_out_opc_vconfig(s, opc, rd, avl, vtypei) \
     tcg_out_opc_vec_config(s, opc, rd, avl, vtypei);
 
@@ -1139,6 +1171,101 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
     tcg_out_opc_branch(s, op, arg1, arg2, 0);
 }
 
+static const struct {
+    RISCVInsn op;
+    bool swap;
+} tcg_cmpcond_to_rvv_vv[] = {
+    [TCG_COND_EQ] =  { OPC_VMSEQ_VV,  false },
+    [TCG_COND_NE] =  { OPC_VMSNE_VV,  false },
+    [TCG_COND_LT] =  { OPC_VMSLT_VV,  false },
+    [TCG_COND_GE] =  { OPC_VMSLE_VV,  true  },
+    [TCG_COND_GT] =  { OPC_VMSLT_VV,  true  },
+    [TCG_COND_LE] =  { OPC_VMSLE_VV,  false },
+    [TCG_COND_LTU] = { OPC_VMSLTU_VV, false },
+    [TCG_COND_GEU] = { OPC_VMSLEU_VV, true  },
+    [TCG_COND_GTU] = { OPC_VMSLTU_VV, true  },
+    [TCG_COND_LEU] = { OPC_VMSLEU_VV, false }
+};
+
+static const struct {
+    RISCVInsn op;
+    bool invert;
+}  tcg_cmpcond_to_rvv_vx[] = {
+    [TCG_COND_EQ]  = { OPC_VMSEQ_VX,  false },
+    [TCG_COND_NE]  = { OPC_VMSNE_VX,  false },
+    [TCG_COND_GT]  = { OPC_VMSGT_VX,  false },
+    [TCG_COND_LE]  = { OPC_VMSLE_VX,  false },
+    [TCG_COND_LT]  = { OPC_VMSLT_VX,  false },
+    [TCG_COND_LTU] = { OPC_VMSLTU_VX, false },
+    [TCG_COND_GTU] = { OPC_VMSGTU_VX, false },
+    [TCG_COND_LEU] = { OPC_VMSLEU_VX, false },
+    [TCG_COND_GE]  = { OPC_VMSLT_VX,  true  },
+    [TCG_COND_GEU] = { OPC_VMSLTU_VX, true  },
+};
+
+static const struct {
+    RISCVInsn op;
+    int min;
+    int max;
+    bool adjust;
+}  tcg_cmpcond_to_rvv_vi[] = {
+    [TCG_COND_EQ]  = { OPC_VMSEQ_VI,  -16, 15, false },
+    [TCG_COND_NE]  = { OPC_VMSNE_VI,  -16, 15, false },
+    [TCG_COND_GT]  = { OPC_VMSGT_VI,  -16, 15, false },
+    [TCG_COND_LE]  = { OPC_VMSLE_VI,  -16, 15, false },
+    [TCG_COND_LT]  = { OPC_VMSLE_VI,  -15, 16, true  },
+    [TCG_COND_GE]  = { OPC_VMSGT_VI,  -15, 16, true  },
+    [TCG_COND_LEU] = { OPC_VMSLEU_VI,   0, 15, false },
+    [TCG_COND_GTU] = { OPC_VMSGTU_VI,   0, 15, false },
+    [TCG_COND_LTU] = { OPC_VMSLEU_VI,   1, 16, true  },
+    [TCG_COND_GEU] = { OPC_VMSGTU_VI,   1, 16, true  },
+};
+
+static void tcg_out_cmp_vec_vv(TCGContext *s, TCGCond cond,
+                                      TCGReg arg1, TCGReg arg2)
+{
+    RISCVInsn op;
+
+    tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_cmpcond_to_rvv_vv));
+    op = tcg_cmpcond_to_rvv_vv[cond].op;
+    tcg_debug_assert(op != 0);
+
+    tcg_out_opc_vv(s, op, TCG_REG_V0, arg1, arg2, true);
+}
+
+static void tcg_out_cmp_vec_vx(TCGContext *s, TCGCond cond,
+                                      TCGReg arg1, TCGReg arg2)
+{
+    RISCVInsn op;
+
+    tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_cmpcond_to_rvv_vx));
+    op = tcg_cmpcond_to_rvv_vx[cond].op;
+    tcg_debug_assert(op != 0);
+
+    tcg_out_opc_vx(s, op, TCG_REG_V0, arg1, arg2, true);
+}
+
+static bool tcg_vec_cmp_can_do_vi(TCGCond cond, int64_t arg)
+{
+    signed imm_min, imm_max;
+
+    imm_min = tcg_cmpcond_to_rvv_vi[cond].min;
+    imm_max = tcg_cmpcond_to_rvv_vi[cond].max;
+    return (arg >= imm_min && arg <= imm_max);
+}
+
+static void tcg_out_cmp_vec_vi(TCGContext *s, TCGCond cond, TCGReg arg1,
+                               tcg_target_long arg2)
+{
+    RISCVInsn op;
+
+    tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_cmpcond_to_rvv_vi));
+    op = tcg_cmpcond_to_rvv_vi[cond].op;
+    tcg_debug_assert(op != 0);
+
+    tcg_out_opc_vi(s, op, TCG_REG_V0, arg1, arg2, true);
+}
+
 #define SETCOND_INV    TCG_TARGET_NB_REGS
 #define SETCOND_NEZ    (SETCOND_INV << 1)
 #define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
@@ -2267,6 +2394,28 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
         riscv_set_vec_config_vl(s, type);
         tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1, true);
         break;
+    case INDEX_op_rvv_cmp_vx:
+        riscv_set_vec_config_vl_vece(s, type, vece);
+        tcg_out_cmp_vec_vx(s, a2, a0, a1);
+        break;
+    case INDEX_op_rvv_cmp_vi:
+        riscv_set_vec_config_vl_vece(s, type, vece);
+        tcg_debug_assert(tcg_vec_cmp_can_do_vi(a2, a1));
+        tcg_out_cmp_vec_vi(s, a2, a0, a1);
+        break;
+    case INDEX_op_rvv_cmp_vv:
+        riscv_set_vec_config_vl_vece(s, type, vece);
+        tcg_out_cmp_vec_vv(s, a2, a0, a1);
+        break;
+    case INDEX_op_rvv_merge_vec:
+        if (const_args[2]) {
+            /* vd[i] == v0.mask[i] ? imm : vs2[i] */
+            tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, a0, a1, a2);
+        } else {
+            /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */
+            tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, a0, a1, a2);
+        }
+        break;
     case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov.  */
     case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec.  */
     default:
@@ -2274,13 +2423,92 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
     }
 }
 
+static void expand_vec_cmp_vv(TCGType type, unsigned vece,
+                              TCGv_vec v1, TCGv_vec v2, TCGCond cond)
+{
+    if (tcg_cmpcond_to_rvv_vv[cond].swap) {
+        vec_gen_3(INDEX_op_rvv_cmp_vv, type, vece,
+                  tcgv_vec_arg(v2), tcgv_vec_arg(v1), cond);
+    } else {
+        vec_gen_3(INDEX_op_rvv_cmp_vv, type, vece,
+                  tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
+    }
+}
+
+static bool expand_vec_cmp_vi(TCGType type, unsigned vece,
+                              TCGv_vec v1, TCGArg a2, TCGCond cond)
+{
+    int64_t arg2 = arg_temp(a2)->val;
+    bool invert = false;
+
+    if (!tcg_vec_cmp_can_do_vi(cond, arg2)) {
+        /* for cmp_vec_vx */
+        vec_gen_3(INDEX_op_rvv_cmp_vx, type, vece,
+                  tcgv_vec_arg(v1), tcgv_i64_arg(tcg_constant_i64(arg2)),
+                  cond);
+
+        tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_cmpcond_to_rvv_vx));
+        invert = tcg_cmpcond_to_rvv_vx[cond].invert;
+    } else {
+        if (tcg_cmpcond_to_rvv_vi[cond].adjust) {
+            arg2 -= 1;
+        }
+        vec_gen_3(INDEX_op_rvv_cmp_vi, type, vece,
+                    tcgv_vec_arg(v1), arg2, cond);
+    }
+    return invert;
+}
+
+static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v1,
+                          TCGArg a2, TCGCond cond)
+{
+    bool invert = false;
+    TCGTemp *t1 = arg_temp(a2);
+
+    if (t1->kind == TEMP_CONST) {
+        invert = expand_vec_cmp_vi(type, vece, v1, a2, cond);
+    } else {
+        expand_vec_cmp_vv(type, vece, v1, temp_tcgv_vec(t1), cond);
+    }
+    return invert;
+}
+
 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
                        TCGArg a0, ...)
 {
+    va_list va;
+    TCGv_vec v0, v1;
+    TCGArg a2, a3;
+
+    va_start(va, a0);
+    v0 = temp_tcgv_vec(arg_temp(a0));
+    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+    a2 = va_arg(va, TCGArg);
+
     switch (opc) {
+    case INDEX_op_cmp_vec:
+        {
+            a3 = va_arg(va, TCGArg);
+
+            /*
+             * Mask values should be widened into SEW-width elements.
+             * For e.g. when SEW = 8bit, do 0b1 -> 0xff, 0b0 -> 0x00.
+             */
+            if (expand_vec_cmp_noinv(type, vece, v1, a2, a3)) {
+                vec_gen_3(INDEX_op_rvv_merge_vec, type, vece, tcgv_vec_arg(v0),
+                          tcgv_vec_arg(tcg_constant_vec(type, vece, -1)),
+                          tcgv_i64_arg(tcg_constant_i64(0)));
+            } else {
+                vec_gen_3(INDEX_op_rvv_merge_vec, type, vece, tcgv_vec_arg(v0),
+                          tcgv_vec_arg(tcg_constant_vec(type, vece, 0)),
+                          tcgv_i64_arg(tcg_constant_i64(-1)));
+            }
+        }
+        break;
     default:
         g_assert_not_reached();
     }
+    va_end(va);
 }
 
 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
@@ -2293,6 +2521,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
     case INDEX_op_xor_vec:
     case INDEX_op_not_vec:
         return 1;
+    case INDEX_op_cmp_vec:
+        return -1;
     default:
         return 0;
     }
@@ -2451,6 +2681,16 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
     case INDEX_op_or_vec:
     case INDEX_op_xor_vec:
         return C_O1_I2(v, v, v);
+    case INDEX_op_rvv_merge_vec:
+        return C_O1_I2(v, v, vK);
+    case INDEX_op_rvv_cmp_vi:
+        return C_O0_I1(v);
+    case INDEX_op_rvv_cmp_vx:
+        return C_O0_I2(v, r);
+    case INDEX_op_rvv_cmp_vv:
+        return C_O0_I2(v, v);
+    case INDEX_op_cmp_vec:
+        return C_O1_I2(v, v, vi);
     default:
         g_assert_not_reached();
     }
diff --git a/tcg/riscv/tcg-target.opc.h b/tcg/riscv/tcg-target.opc.h
index b80b39e1e5..8eb0daf0a7 100644
--- a/tcg/riscv/tcg-target.opc.h
+++ b/tcg/riscv/tcg-target.opc.h
@@ -10,3 +10,8 @@
  * emitted by tcg_expand_vec_op.  For those familiar with GCC internals,
  * consider these to be UNSPEC with names.
  */
+
+DEF(rvv_cmp_vi, 0, 1, 2, IMPLVEC)
+DEF(rvv_cmp_vx, 0, 2, 1, IMPLVEC)
+DEF(rvv_cmp_vv, 0, 2, 1, IMPLVEC)
+DEF(rvv_merge_vec, 1, 2, 0, IMPLVEC)
-- 
2.43.0
Re: [PATCH v2 08/14] tcg/riscv: Implement vector cmp ops
Posted by Richard Henderson 2 months, 3 weeks ago
On 8/30/24 16:16, LIU Zhiwei wrote:
> From: TANG Tiancheng<tangtiancheng.ttc@alibaba-inc.com>
> 
> 1.Address immediate value constraints in RISC-V Vector Extension 1.0 for
> comparison instructions.
> 
> 2.Extend comparison results from mask registers to SEW-width elements,
>    following recommendations in The RISC-V SPEC Volume I (Version 20240411).
> 
> This aligns with TCG's cmp_vec behavior by expanding compare results to
> full element width: all 1s for true, all 0s for false.
> 
> Signed-off-by: TANG Tiancheng<tangtiancheng.ttc@alibaba-inc.com>
> Reviewed-by: Liu Zhiwei<zhiwei_liu@linux.alibaba.com>
> ---
>   tcg/riscv/tcg-target-con-set.h |   6 +-
>   tcg/riscv/tcg-target.c.inc     | 240 +++++++++++++++++++++++++++++++++
>   tcg/riscv/tcg-target.opc.h     |   5 +
>   3 files changed, 250 insertions(+), 1 deletion(-)
> 
> diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
> index 7277cb9af8..6c9ad5188b 100644
> --- a/tcg/riscv/tcg-target-con-set.h
> +++ b/tcg/riscv/tcg-target-con-set.h
> @@ -21,7 +21,11 @@ C_O1_I2(r, rZ, rZ)
>   C_N1_I2(r, r, rM)
>   C_O1_I4(r, r, rI, rM, rM)
>   C_O2_I4(r, r, rZ, rZ, rM, rM)
> +C_O0_I1(v)
> +C_O0_I2(v, v)
>   C_O0_I2(v, r)
> -C_O0_I2(v, vK)

Removing vK, just added in the previous patch.

> +static bool expand_vec_cmp_vi(TCGType type, unsigned vece,
> +                              TCGv_vec v1, TCGArg a2, TCGCond cond)
> +{
> +    int64_t arg2 = arg_temp(a2)->val;
> +    bool invert = false;
> +
> +    if (!tcg_vec_cmp_can_do_vi(cond, arg2)) {
...
> +static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v1,
> +                          TCGArg a2, TCGCond cond)
> +{
> +    bool invert = false;
> +    TCGTemp *t1 = arg_temp(a2);
> +
> +    if (t1->kind == TEMP_CONST) {
> +        invert = expand_vec_cmp_vi(type, vece, v1, a2, cond);

This will not work as you intend, primarily because vector constants are stored in 
expanded form. E.g. MO_8 1 is stored as 0x0101010101010101.

This is handled transparently *if* you use tcg_target_const_match instead.
Otherwise one must (sign)extract the low vece bits, and then double-check that the 
replication of the low bits matches the complete 'a2' value.

I agree that we should be prepared for more vector x scalar operations, but that needs to 
happen during generic expansion rather than very late in the backend.

I think the first implementation should be simpler:

CONST('C', TCG_CT_CONST_CMP_VI)

tcg_target_const_match()
{
     ...
     if ((ct & TCG_CT_CONST_CMP_VI) &&
         val >= tcg_cmpcond_to_rvv_vi[cond].min &&
         val <= tcg_cmpcond_to_rvv_vi[cond].max) {
         return true;
     }
}

     case INDEX_op_cmp_vec:
         riscv_set_vec_config_vl_vece(s, type, vece);
         cond = args[3];
         if (c2) {
             tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, a0, a1,
                            a2 - tcg_cmpcond_to_rvv_vi[cond].adjust);
         } else if (tcg_cmpcond_to_rvv_vv[cond].swap) {
             tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, a0, a2, a1);
         } else {
             tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, a0, a1, a2);
         }
         break;

This appears to not require any expansion in tcg_expand_vec_op at all.


r~
Re: [PATCH v2 08/14] tcg/riscv: Implement vector cmp ops
Posted by Richard Henderson 2 months, 3 weeks ago
On 9/2/24 23:45, Richard Henderson wrote:
> I think the first implementation should be simpler:
> 
> CONST('C', TCG_CT_CONST_CMP_VI)
> 
> tcg_target_const_match()
> {
>      ...
>      if ((ct & TCG_CT_CONST_CMP_VI) &&
>          val >= tcg_cmpcond_to_rvv_vi[cond].min &&
>          val <= tcg_cmpcond_to_rvv_vi[cond].max) {
>          return true;
>      }
> }
> 
>      case INDEX_op_cmp_vec:
>          riscv_set_vec_config_vl_vece(s, type, vece);
>          cond = args[3];
>          if (c2) {
>              tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, a0, a1,
>                             a2 - tcg_cmpcond_to_rvv_vi[cond].adjust);
>          } else if (tcg_cmpcond_to_rvv_vv[cond].swap) {
>              tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, a0, a2, a1);
>          } else {
>              tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, a0, a1, a2);
>          }
>          break;
> 
> This appears to not require any expansion in tcg_expand_vec_op at all.

I knew I should have slept on that answer.
Of course you need expansion, because riscv cmp_vv produces a mask.

However, I think we should simply model this as INDEX_op_cmpsel_vec:

     case INDEX_op_cmpsel_vec:
           riscv_set_vec_config_vl_vece(s, type, vece);
           a3 = args[3];
           a4 = args[4];
           cond = args[5];
           /* Use only vmerge_vim if possible, by inverting the test. */
           if (const_args[4] && !const_args[3]) {
               cond = tcg_cond_inv(cond);
               a3 = a4;
               a4 = args[3];
               const_args[3] = true;
               const_args[4] = false;
           }
           /* Perform the comparison into V0 mask. */
           if (const_args[2]) {
               tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op,
                              TCG_REG_V0, a1,
                              a2 - tcg_cmpcond_to_rvv_vi[cond].adjust);
           } else if (tcg_cmpcond_to_rvv_vv[cond].swap) {
               tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
                              TCG_REG_V0, a2, a1);
           } else {
               tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
                              TCG_REG_V0, a1, a2);
           }
           if (const_args[3]) {
               if (const_args[4]) {
                   tcg_out_opc_vi(s, OPC_VMV_V_I, a0, TCG_REG_V0, a4, true);
                   a4 = a0;
               }
               tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, a0, a3, a4);
           } else {
               tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, a0, a3, a4);
           }
           break;

Then INDEX_op_cmp_vec should be expanded to

     INDEX_op_cmpsel_vec a0, a1, a2, -1, 0, a3


r~