From: TANG Tiancheng <tangtiancheng.ttc@alibaba-inc.com>
Signed-off-by: TANG Tiancheng <tangtiancheng.ttc@alibaba-inc.com>
Reviewed-by: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
---
tcg/riscv/tcg-target.c.inc | 98 +++++++++++++++++++++++++++++++++++++-
tcg/riscv/tcg-target.h | 8 ++--
tcg/riscv/tcg-target.opc.h | 3 ++
3 files changed, 104 insertions(+), 5 deletions(-)
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 31e161c5bc..6560d3381a 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -358,10 +358,13 @@ typedef enum {
OPC_VMSGT_VX = 0x7c000057 | V_OPIVX,
OPC_VSLL_VV = 0x94000057 | V_OPIVV,
+ OPC_VSLL_VI = 0x94000057 | V_OPIVI,
OPC_VSLL_VX = 0x94000057 | V_OPIVX,
OPC_VSRL_VV = 0xa0000057 | V_OPIVV,
+ OPC_VSRL_VI = 0xa0000057 | V_OPIVI,
OPC_VSRL_VX = 0xa0000057 | V_OPIVX,
OPC_VSRA_VV = 0xa4000057 | V_OPIVV,
+ OPC_VSRA_VI = 0xa4000057 | V_OPIVI,
OPC_VSRA_VX = 0xa4000057 | V_OPIVX,
OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
@@ -2477,6 +2480,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
riscv_set_vec_config_vl_vece(s, type, vece);
tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2, true);
break;
+ case INDEX_op_rvv_shli_vec:
+ riscv_set_vec_config_vl_vece(s, type, vece);
+ tcg_out_opc_vi(s, OPC_VSLL_VI, a0, a1, a2, true);
+ break;
+ case INDEX_op_rvv_shri_vec:
+ riscv_set_vec_config_vl_vece(s, type, vece);
+ tcg_out_opc_vi(s, OPC_VSRL_VI, a0, a1, a2, true);
+ break;
+ case INDEX_op_rvv_sari_vec:
+ riscv_set_vec_config_vl_vece(s, type, vece);
+ tcg_out_opc_vi(s, OPC_VSRA_VI, a0, a1, a2, true);
+ break;
case INDEX_op_rvv_cmp_vx:
riscv_set_vec_config_vl_vece(s, type, vece);
tcg_out_cmp_vec_vx(s, a2, a0, a1);
@@ -2561,7 +2576,8 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
va_list va;
- TCGv_vec v0, v1;
+ TCGv_vec v0, v1, v2, t1;
+ TCGv_i32 t2;
TCGArg a2, a3;
va_start(va, a0);
@@ -2589,6 +2605,69 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
}
}
break;
+ case INDEX_op_shli_vec:
+ if (a2 > 31) {
+ tcg_gen_shls_vec(vece, v0, v1, tcg_constant_i32(a2));
+ } else {
+ vec_gen_3(INDEX_op_rvv_shli_vec, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), a2);
+ }
+ break;
+ case INDEX_op_shri_vec:
+ if (a2 > 31) {
+ tcg_gen_shrs_vec(vece, v0, v1, tcg_constant_i32(a2));
+ } else {
+ vec_gen_3(INDEX_op_rvv_shri_vec, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), a2);
+ }
+ break;
+ case INDEX_op_sari_vec:
+ if (a2 > 31) {
+ tcg_gen_sars_vec(vece, v0, v1, tcg_constant_i32(a2));
+ } else {
+ vec_gen_3(INDEX_op_rvv_sari_vec, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), a2);
+ }
+ break;
+ case INDEX_op_rotli_vec:
+ t1 = tcg_temp_new_vec(type);
+ tcg_gen_shli_vec(vece, t1, v1, a2);
+ tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - a2);
+ tcg_gen_or_vec(vece, v0, v0, t1);
+ tcg_temp_free_vec(t1);
+ break;
+ case INDEX_op_rotls_vec:
+ t1 = tcg_temp_new_vec(type);
+ t2 = tcg_temp_new_i32();
+ tcg_gen_neg_i32(t2, temp_tcgv_i32(arg_temp(a2)));
+ tcg_gen_shrs_vec(vece, v0, v1, t2);
+ tcg_gen_shls_vec(vece, t1, v1, temp_tcgv_i32(arg_temp(a2)));
+ tcg_gen_or_vec(vece, v0, v0, t1);
+ tcg_temp_free_vec(t1);
+ tcg_temp_free_i32(t2);
+ break;
+ case INDEX_op_rotlv_vec:
+ v2 = temp_tcgv_vec(arg_temp(a2));
+ t1 = tcg_temp_new_vec(type);
+ tcg_gen_neg_vec(vece, t1, v2);
+ vec_gen_3(INDEX_op_shrv_vec, type, vece, tcgv_vec_arg(t1),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
+ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2));
+ tcg_gen_or_vec(vece, v0, v0, t1);
+ tcg_temp_free_vec(t1);
+ break;
+ case INDEX_op_rotrv_vec:
+ v2 = temp_tcgv_vec(arg_temp(a2));
+ t1 = tcg_temp_new_vec(type);
+ tcg_gen_neg_vec(vece, t1, v2);
+ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
+ vec_gen_3(INDEX_op_shrv_vec, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2));
+ tcg_gen_or_vec(vece, v0, v0, t1);
+ tcg_temp_free_vec(t1);
+ break;
default:
g_assert_not_reached();
}
@@ -2622,6 +2701,13 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
case INDEX_op_sarv_vec:
return 1;
case INDEX_op_cmp_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_rotls_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
+ case INDEX_op_rotli_vec:
return -1;
default:
return 0;
@@ -2775,6 +2861,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I1(v, r);
case INDEX_op_neg_vec:
case INDEX_op_not_vec:
+ case INDEX_op_rotli_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_rvv_shli_vec:
+ case INDEX_op_rvv_shri_vec:
+ case INDEX_op_rvv_sari_vec:
return C_O1_I1(v, v);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
@@ -2793,10 +2886,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_shlv_vec:
case INDEX_op_shrv_vec:
case INDEX_op_sarv_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
return C_O1_I2(v, v, v);
case INDEX_op_shls_vec:
case INDEX_op_shrs_vec:
case INDEX_op_sars_vec:
+ case INDEX_op_rotls_vec:
return C_O1_I2(v, v, r);
case INDEX_op_rvv_merge_vec:
return C_O1_I2(v, v, vK);
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
index 41c6c446e8..eb5129a976 100644
--- a/tcg/riscv/tcg-target.h
+++ b/tcg/riscv/tcg-target.h
@@ -154,10 +154,10 @@ typedef enum {
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 1
#define TCG_TARGET_HAS_abs_vec 0
-#define TCG_TARGET_HAS_roti_vec 0
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec 0
-#define TCG_TARGET_HAS_shi_vec 0
+#define TCG_TARGET_HAS_roti_vec -1
+#define TCG_TARGET_HAS_rots_vec -1
+#define TCG_TARGET_HAS_rotv_vec -1
+#define TCG_TARGET_HAS_shi_vec -1
#define TCG_TARGET_HAS_shs_vec 1
#define TCG_TARGET_HAS_shv_vec 1
#define TCG_TARGET_HAS_mul_vec 1
diff --git a/tcg/riscv/tcg-target.opc.h b/tcg/riscv/tcg-target.opc.h
index 8eb0daf0a7..9f31286025 100644
--- a/tcg/riscv/tcg-target.opc.h
+++ b/tcg/riscv/tcg-target.opc.h
@@ -15,3 +15,6 @@ DEF(rvv_cmp_vi, 0, 1, 2, IMPLVEC)
DEF(rvv_cmp_vx, 0, 2, 1, IMPLVEC)
DEF(rvv_cmp_vv, 0, 2, 1, IMPLVEC)
DEF(rvv_merge_vec, 1, 2, 0, IMPLVEC)
+DEF(rvv_shli_vec, 1, 1, 1, IMPLVEC)
+DEF(rvv_shri_vec, 1, 1, 1, IMPLVEC)
+DEF(rvv_sari_vec, 1, 1, 1, IMPLVEC)
--
2.43.0
On 8/29/24 23:16, LIU Zhiwei wrote:
> @@ -2589,6 +2605,69 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
> }
> }
> break;
> + case INDEX_op_shli_vec:
> + if (a2 > 31) {
> + tcg_gen_shls_vec(vece, v0, v1, tcg_constant_i32(a2));
> + } else {
> + vec_gen_3(INDEX_op_rvv_shli_vec, type, vece, tcgv_vec_arg(v0),
> + tcgv_vec_arg(v1), a2);
> + }
> + break;
> + case INDEX_op_shri_vec:
> + if (a2 > 31) {
> + tcg_gen_shrs_vec(vece, v0, v1, tcg_constant_i32(a2));
> + } else {
> + vec_gen_3(INDEX_op_rvv_shri_vec, type, vece, tcgv_vec_arg(v0),
> + tcgv_vec_arg(v1), a2);
> + }
> + break;
> + case INDEX_op_sari_vec:
> + if (a2 > 31) {
> + tcg_gen_sars_vec(vece, v0, v1, tcg_constant_i32(a2));
> + } else {
> + vec_gen_3(INDEX_op_rvv_sari_vec, type, vece, tcgv_vec_arg(v0),
> + tcgv_vec_arg(v1), a2);
> + }
> + break;
> + case INDEX_op_rotli_vec:
> + t1 = tcg_temp_new_vec(type);
> + tcg_gen_shli_vec(vece, t1, v1, a2);
> + tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - a2);
> + tcg_gen_or_vec(vece, v0, v0, t1);
> + tcg_temp_free_vec(t1);
> + break;
> + case INDEX_op_rotls_vec:
> + t1 = tcg_temp_new_vec(type);
> + t2 = tcg_temp_new_i32();
> + tcg_gen_neg_i32(t2, temp_tcgv_i32(arg_temp(a2)));
> + tcg_gen_shrs_vec(vece, v0, v1, t2);
> + tcg_gen_shls_vec(vece, t1, v1, temp_tcgv_i32(arg_temp(a2)));
> + tcg_gen_or_vec(vece, v0, v0, t1);
> + tcg_temp_free_vec(t1);
> + tcg_temp_free_i32(t2);
> + break;
I'm trying to work out how much benefit there is here of expanding these early, as opposed
to simply using TCG_REG_TMP0 when the immediate doesn't fit, or for rotls_vec negation.
> + case INDEX_op_rotlv_vec:
> + v2 = temp_tcgv_vec(arg_temp(a2));
> + t1 = tcg_temp_new_vec(type);
> + tcg_gen_neg_vec(vece, t1, v2);
> + vec_gen_3(INDEX_op_shrv_vec, type, vece, tcgv_vec_arg(t1),
> + tcgv_vec_arg(v1), tcgv_vec_arg(t1));
> + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
> + tcgv_vec_arg(v1), tcgv_vec_arg(v2));
> + tcg_gen_or_vec(vece, v0, v0, t1);
> + tcg_temp_free_vec(t1);
> + break;
> + case INDEX_op_rotrv_vec:
> + v2 = temp_tcgv_vec(arg_temp(a2));
> + t1 = tcg_temp_new_vec(type);
> + tcg_gen_neg_vec(vece, t1, v2);
> + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
> + tcgv_vec_arg(v1), tcgv_vec_arg(t1));
> + vec_gen_3(INDEX_op_shrv_vec, type, vece, tcgv_vec_arg(v0),
> + tcgv_vec_arg(v1), tcgv_vec_arg(v2));
> + tcg_gen_or_vec(vece, v0, v0, t1);
> + tcg_temp_free_vec(t1);
> + break;
And here we can use TCG_REG_V0 as the temporary, both for negation and shift intermediate.
vrsub_vi V0, a2, 0
vshlv_vv V0, a1, V0
vshrv_vv a0, a1, a2
vor_vv a0, a0, V0
r~
On 2024/9/3 23:15, Richard Henderson wrote:
> On 8/29/24 23:16, LIU Zhiwei wrote:
>> @@ -2589,6 +2605,69 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType
>> type, unsigned vece,
>> }
>> }
>> break;
>> + case INDEX_op_shli_vec:
>> + if (a2 > 31) {
>> + tcg_gen_shls_vec(vece, v0, v1, tcg_constant_i32(a2));
>> + } else {
>> + vec_gen_3(INDEX_op_rvv_shli_vec, type, vece,
>> tcgv_vec_arg(v0),
>> + tcgv_vec_arg(v1), a2);
>> + }
>> + break;
>> + case INDEX_op_shri_vec:
>> + if (a2 > 31) {
>> + tcg_gen_shrs_vec(vece, v0, v1, tcg_constant_i32(a2));
>> + } else {
>> + vec_gen_3(INDEX_op_rvv_shri_vec, type, vece,
>> tcgv_vec_arg(v0),
>> + tcgv_vec_arg(v1), a2);
>> + }
>> + break;
>> + case INDEX_op_sari_vec:
>> + if (a2 > 31) {
>> + tcg_gen_sars_vec(vece, v0, v1, tcg_constant_i32(a2));
>> + } else {
>> + vec_gen_3(INDEX_op_rvv_sari_vec, type, vece,
>> tcgv_vec_arg(v0),
>> + tcgv_vec_arg(v1), a2);
>> + }
>> + break;
>> + case INDEX_op_rotli_vec:
>> + t1 = tcg_temp_new_vec(type);
>> + tcg_gen_shli_vec(vece, t1, v1, a2);
>> + tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - a2);
>> + tcg_gen_or_vec(vece, v0, v0, t1);
>> + tcg_temp_free_vec(t1);
>> + break;
>> + case INDEX_op_rotls_vec:
>> + t1 = tcg_temp_new_vec(type);
>> + t2 = tcg_temp_new_i32();
>> + tcg_gen_neg_i32(t2, temp_tcgv_i32(arg_temp(a2)));
>> + tcg_gen_shrs_vec(vece, v0, v1, t2);
>> + tcg_gen_shls_vec(vece, t1, v1, temp_tcgv_i32(arg_temp(a2)));
>> + tcg_gen_or_vec(vece, v0, v0, t1);
>> + tcg_temp_free_vec(t1);
>> + tcg_temp_free_i32(t2);
>> + break;
>
> I'm trying to work out how much benefit there is here of expanding
> these early, as opposed to simply using TCG_REG_TMP0 when the
> immediate doesn't fit,
We find for rotli, it just copied code from the implementation of
INDEX_op_shli_vec and INDEX_op_shri_vec if we don't expand it.
case INDEX_op_rotli_vec:
if (a2 > 31) {
tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, a2);
tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, TCG_REG_TMP0, true);
} else {
tcg_out_opc_vi(s, OPC_VSLL_VI, TCG_REG_V0, a1, a2, true);
}
if ((8 << vece) - a2) > 31) {
tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, 8 << vece) - a2);
tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0, true);
} else {
tcg_out_opc_vi(s, OPC_VSRL_VI, a0, a1, 8 << vece) - a2, true);
}
tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0, true);
break;
Thus, I prefer to expand it early, at least for rotli_vec.
Thanks,
Zhiwei
> or for rotls_vec negation.
>
>> + case INDEX_op_rotlv_vec:
>> + v2 = temp_tcgv_vec(arg_temp(a2));
>> + t1 = tcg_temp_new_vec(type);
>> + tcg_gen_neg_vec(vece, t1, v2);
>> + vec_gen_3(INDEX_op_shrv_vec, type, vece, tcgv_vec_arg(t1),
>> + tcgv_vec_arg(v1), tcgv_vec_arg(t1));
>> + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
>> + tcgv_vec_arg(v1), tcgv_vec_arg(v2));
>> + tcg_gen_or_vec(vece, v0, v0, t1);
>> + tcg_temp_free_vec(t1);
>> + break;
>> + case INDEX_op_rotrv_vec:
>> + v2 = temp_tcgv_vec(arg_temp(a2));
>> + t1 = tcg_temp_new_vec(type);
>> + tcg_gen_neg_vec(vece, t1, v2);
>> + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
>> + tcgv_vec_arg(v1), tcgv_vec_arg(t1));
>> + vec_gen_3(INDEX_op_shrv_vec, type, vece, tcgv_vec_arg(v0),
>> + tcgv_vec_arg(v1), tcgv_vec_arg(v2));
>> + tcg_gen_or_vec(vece, v0, v0, t1);
>> + tcg_temp_free_vec(t1);
>> + break;
>
> And here we can use TCG_REG_V0 as the temporary, both for negation and
> shift intermediate.
>
> vrsub_vi V0, a2, 0
> vshlv_vv V0, a1, V0
> vshrv_vv a0, a1, a2
> vor_vv a0, a0, V0
>
>
> r~
On 9/4/24 08:25, LIU Zhiwei wrote:
>> I'm trying to work out how much benefit there is here of expanding these early, as
>> opposed to simply using TCG_REG_TMP0 when the immediate doesn't fit,
>
> We find for rotli, it just copied code from the implementation of INDEX_op_shli_vec and
> INDEX_op_shri_vec if we don't expand it.
>
> case INDEX_op_rotli_vec:
> if (a2 > 31) {
> tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, a2);
> tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, TCG_REG_TMP0, true);
> } else {
> tcg_out_opc_vi(s, OPC_VSLL_VI, TCG_REG_V0, a1, a2, true);
> }
>
> if ((8 << vece) - a2) > 31) {
> tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, 8 << vece) - a2);
> tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0, true);
> } else {
> tcg_out_opc_vi(s, OPC_VSRL_VI, a0, a1, 8 << vece) - a2, true);
> }
> tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0, true);
> break;
>
> Thus, I prefer to expand it early, at least for rotli_vec.
static void tcg_out_vshifti(TCGContext *s, RISCVInsn op_vi, RISCVInsn op_vx,
TCGReg dst, TCGReg src, unsigned imm)
{
if (imm < 32) {
tcg_out_opc_vi(s, op_vi, dst, src, imm);
} else {
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm);
tcg_out_opc_vx(s, op_vx, dst, src, TCG_REG_TMP0);
}
}
case INDEX_op_shli_vec:
set_vconfig_vl_sew(s, type, vece);
tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2);
break;
case INDEX_op_rotli_vec:
set_vconfig_vl_sew(s, type, vece);
tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
a2 = -a2 & ((8 << vece) - 1);
tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2);
tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
break;
r~
On 2024/9/5 3:05, Richard Henderson wrote:
> On 9/4/24 08:25, LIU Zhiwei wrote:
>>> I'm trying to work out how much benefit there is here of expanding
>>> these early, as opposed to simply using TCG_REG_TMP0 when the
>>> immediate doesn't fit,
>>
>> We find for rotli, it just copied code from the implementation of
>> INDEX_op_shli_vec and INDEX_op_shri_vec if we don't expand it.
>>
>> case INDEX_op_rotli_vec:
>> if (a2 > 31) {
>> tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO,
>> a2);
>> tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1,
>> TCG_REG_TMP0, true);
>> } else {
>> tcg_out_opc_vi(s, OPC_VSLL_VI, TCG_REG_V0, a1, a2, true);
>> }
>>
>> if ((8 << vece) - a2) > 31) {
>> tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO,
>> 8 << vece) - a2);
>> tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0, true);
>> } else {
>> tcg_out_opc_vi(s, OPC_VSRL_VI, a0, a1, 8 << vece) - a2,
>> true);
>> }
>> tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0, true);
>> break;
>>
>> Thus, I prefer to expand it early, at least for rotli_vec.
>
> static void tcg_out_vshifti(TCGContext *s, RISCVInsn op_vi, RISCVInsn
> op_vx,
> TCGReg dst, TCGReg src, unsigned imm)
> {
> if (imm < 32) {
> tcg_out_opc_vi(s, op_vi, dst, src, imm);
> } else {
> tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm);
> tcg_out_opc_vx(s, op_vx, dst, src, TCG_REG_TMP0);
> }
> }
>
>
Thanks for the guide.
> case INDEX_op_shli_vec:
> set_vconfig_vl_sew(s, type, vece);
> tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2);
> break;
>
> case INDEX_op_rotli_vec:
> set_vconfig_vl_sew(s, type, vece);
> tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
> a2 = -a2 & ((8 << vece) - 1);
> tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2);
> tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
> break;
OK. We will take this way.
Thanks,
Zhiwei
>
> r~
© 2016 - 2026 Red Hat, Inc.