These can be simplified to and/andc, avoiding the load of
the zero into a register.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/i386/tcg-target-con-set.h | 2 +-
tcg/i386/tcg-target-con-str.h | 1 +
tcg/i386/tcg-target.c.inc | 26 +++++++++++++++++++++++---
3 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
index da4411d96b..a9ff245c42 100644
--- a/tcg/i386/tcg-target-con-set.h
+++ b/tcg/i386/tcg-target-con-set.h
@@ -50,7 +50,7 @@ C_N1_I2(r, r, r)
C_N1_I2(r, r, rW)
C_O1_I3(x, 0, x, x)
C_O1_I3(x, x, x, x)
-C_O1_I4(x, x, x, x, x)
+C_O1_I4(x, x, x, xO, xO)
C_O1_I4(r, r, reT, r, 0)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, L)
diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h
index cc22db227b..52142ab121 100644
--- a/tcg/i386/tcg-target-con-str.h
+++ b/tcg/i386/tcg-target-con-str.h
@@ -28,6 +28,7 @@ REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
*/
CONST('e', TCG_CT_CONST_S32)
CONST('I', TCG_CT_CONST_I32)
+CONST('O', TCG_CT_CONST_ZERO)
CONST('T', TCG_CT_CONST_TST)
CONST('W', TCG_CT_CONST_WSZ)
CONST('Z', TCG_CT_CONST_U32)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index a04dc7d270..c63c3faed8 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -133,6 +133,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_CT_CONST_I32 0x400
#define TCG_CT_CONST_WSZ 0x800
#define TCG_CT_CONST_TST 0x1000
+#define TCG_CT_CONST_ZERO 0x2000
/* Registers used with L constraint, which are the first argument
registers on x86_64, and two random call clobbered registers on
@@ -226,6 +227,9 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
return 1;
}
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return 1;
+ }
return 0;
}
@@ -3119,13 +3123,29 @@ static void tcg_out_cmpsel_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg v0, TCGReg c1, TCGReg c2,
TCGReg v3, TCGReg v4, TCGCond cond)
{
+ /*
+ * Since XMM0 is 16, the only way we get 0 into V3 and V4
+ * is via the constant zero constraint.
+ */
+ if (!v3 && !v4) {
+ tcg_out_dupi_vec(s, type, vece, v0, 0);
+ return;
+ }
+
if (tcg_out_cmp_vec_noinv(s, type, vece, TCG_TMP_VEC, c1, c2, cond)) {
TCGReg swap = v3;
v3 = v4;
v4 = swap;
}
- tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, v0, v4, v3, type);
- tcg_out8(s, (TCG_TMP_VEC - TCG_REG_XMM0) << 4);
+
+ if (!v3) {
+ tcg_out_vex_modrm_type(s, OPC_PANDN, v0, TCG_TMP_VEC, v4, type);
+ } else if (!v4) {
+ tcg_out_vex_modrm_type(s, OPC_PAND, v0, TCG_TMP_VEC, v3, type);
+ } else {
+ tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, v0, v4, v3, type);
+ tcg_out8(s, (TCG_TMP_VEC - TCG_REG_XMM0) << 4);
+ }
}
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
@@ -3716,7 +3736,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_bitsel_vec:
return C_O1_I3(x, x, x, x);
case INDEX_op_cmpsel_vec:
- return C_O1_I4(x, x, x, x, x);
+ return C_O1_I4(x, x, x, xO, xO);
default:
g_assert_not_reached();
--
2.43.0