This requires finishing the conversion to tcg_target_op_def.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tci/tcg-target-con-set.h | 25 ++++
tcg/tci/tcg-target.h | 2 +
tcg/tci/tcg-target.c.inc | 279 +++++++++++++++++------------------
3 files changed, 161 insertions(+), 145 deletions(-)
create mode 100644 tcg/tci/tcg-target-con-set.h
diff --git a/tcg/tci/tcg-target-con-set.h b/tcg/tci/tcg-target-con-set.h
new file mode 100644
index 0000000000..38e82f7535
--- /dev/null
+++ b/tcg/tci/tcg-target-con-set.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * TCI target-specific constraint sets.
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
+ * Each operand should be a sequence of constraint letters as defined by
+ * tcg-target-con-str.h; the constraint combination is inclusive or.
+ */
+C_O0_I2(r, r)
+C_O0_I2(r, ri)
+C_O0_I3(r, r, r)
+C_O0_I4(r, r, ri, ri)
+C_O0_I4(r, r, r, r)
+C_O1_I1(r, r)
+C_O1_I2(r, 0, r)
+C_O1_I2(r, ri, ri)
+C_O1_I2(r, r, r)
+C_O1_I2(r, r, ri)
+C_O1_I4(r, r, r, ri, ri)
+C_O2_I1(r, r, r)
+C_O2_I2(r, r, r, r)
+C_O2_I4(r, r, r, r, r, r)
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index bb784e018e..1efd8c4fb0 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -207,4 +207,6 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
/* no need to flush icache explicitly */
}
+#define TCG_TARGET_CON_SET_H
+
#endif /* TCG_TARGET_H */
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index 493bbf1e39..62bedaca28 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -37,154 +37,143 @@
/* Bitfield n...m (in 32 bit value). */
#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
-/* Macros used in tcg_target_op_defs. */
-#define R "r"
-#define RI "ri"
-#if TCG_TARGET_REG_BITS == 32
-# define R64 "r", "r"
-#else
-# define R64 "r"
-#endif
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
-# define L "r", "r"
-# define S "r", "r"
-#else
-# define L "r"
-# define S "r"
-#endif
-
-/* TODO: documentation. */
-static const TCGTargetOpDef tcg_target_op_defs[] = {
- { INDEX_op_exit_tb, { NULL } },
- { INDEX_op_goto_tb, { NULL } },
- { INDEX_op_br, { NULL } },
-
- { INDEX_op_ld8u_i32, { R, R } },
- { INDEX_op_ld8s_i32, { R, R } },
- { INDEX_op_ld16u_i32, { R, R } },
- { INDEX_op_ld16s_i32, { R, R } },
- { INDEX_op_ld_i32, { R, R } },
- { INDEX_op_st8_i32, { R, R } },
- { INDEX_op_st16_i32, { R, R } },
- { INDEX_op_st_i32, { R, R } },
-
- { INDEX_op_add_i32, { R, RI, RI } },
- { INDEX_op_sub_i32, { R, RI, RI } },
- { INDEX_op_mul_i32, { R, RI, RI } },
- { INDEX_op_div_i32, { R, R, R } },
- { INDEX_op_divu_i32, { R, R, R } },
- { INDEX_op_rem_i32, { R, R, R } },
- { INDEX_op_remu_i32, { R, R, R } },
- /* TODO: Does R, RI, RI result in faster code than R, R, RI?
- If both operands are constants, we can optimize. */
- { INDEX_op_and_i32, { R, RI, RI } },
- { INDEX_op_andc_i32, { R, RI, RI } },
- { INDEX_op_eqv_i32, { R, RI, RI } },
- { INDEX_op_nand_i32, { R, RI, RI } },
- { INDEX_op_nor_i32, { R, RI, RI } },
- { INDEX_op_or_i32, { R, RI, RI } },
- { INDEX_op_orc_i32, { R, RI, RI } },
- { INDEX_op_xor_i32, { R, RI, RI } },
- { INDEX_op_shl_i32, { R, RI, RI } },
- { INDEX_op_shr_i32, { R, RI, RI } },
- { INDEX_op_sar_i32, { R, RI, RI } },
- { INDEX_op_rotl_i32, { R, RI, RI } },
- { INDEX_op_rotr_i32, { R, RI, RI } },
- { INDEX_op_deposit_i32, { R, "0", R } },
-
- { INDEX_op_brcond_i32, { R, RI } },
-
- { INDEX_op_setcond_i32, { R, R, RI } },
- { INDEX_op_setcond_i64, { R, R, RI } },
-
- /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
- { INDEX_op_add2_i32, { R, R, R, R, R, R } },
- { INDEX_op_sub2_i32, { R, R, R, R, R, R } },
- { INDEX_op_brcond2_i32, { R, R, RI, RI } },
- { INDEX_op_mulu2_i32, { R, R, R, R } },
- { INDEX_op_setcond2_i32, { R, R, R, RI, RI } },
-
- { INDEX_op_not_i32, { R, R } },
- { INDEX_op_neg_i32, { R, R } },
-
- { INDEX_op_ld8u_i64, { R, R } },
- { INDEX_op_ld8s_i64, { R, R } },
- { INDEX_op_ld16u_i64, { R, R } },
- { INDEX_op_ld16s_i64, { R, R } },
- { INDEX_op_ld32u_i64, { R, R } },
- { INDEX_op_ld32s_i64, { R, R } },
- { INDEX_op_ld_i64, { R, R } },
-
- { INDEX_op_st8_i64, { R, R } },
- { INDEX_op_st16_i64, { R, R } },
- { INDEX_op_st32_i64, { R, R } },
- { INDEX_op_st_i64, { R, R } },
-
- { INDEX_op_add_i64, { R, RI, RI } },
- { INDEX_op_sub_i64, { R, RI, RI } },
- { INDEX_op_mul_i64, { R, RI, RI } },
- { INDEX_op_div_i64, { R, R, R } },
- { INDEX_op_divu_i64, { R, R, R } },
- { INDEX_op_rem_i64, { R, R, R } },
- { INDEX_op_remu_i64, { R, R, R } },
- { INDEX_op_and_i64, { R, RI, RI } },
- { INDEX_op_andc_i64, { R, RI, RI } },
- { INDEX_op_eqv_i64, { R, RI, RI } },
- { INDEX_op_nand_i64, { R, RI, RI } },
- { INDEX_op_nor_i64, { R, RI, RI } },
- { INDEX_op_or_i64, { R, RI, RI } },
- { INDEX_op_orc_i64, { R, RI, RI } },
- { INDEX_op_xor_i64, { R, RI, RI } },
- { INDEX_op_shl_i64, { R, RI, RI } },
- { INDEX_op_shr_i64, { R, RI, RI } },
- { INDEX_op_sar_i64, { R, RI, RI } },
- { INDEX_op_rotl_i64, { R, RI, RI } },
- { INDEX_op_rotr_i64, { R, RI, RI } },
- { INDEX_op_deposit_i64, { R, "0", R } },
- { INDEX_op_brcond_i64, { R, RI } },
-
- { INDEX_op_ext8s_i64, { R, R } },
- { INDEX_op_ext16s_i64, { R, R } },
- { INDEX_op_ext32s_i64, { R, R } },
- { INDEX_op_ext8u_i64, { R, R } },
- { INDEX_op_ext16u_i64, { R, R } },
- { INDEX_op_ext32u_i64, { R, R } },
- { INDEX_op_ext_i32_i64, { R, R } },
- { INDEX_op_extu_i32_i64, { R, R } },
- { INDEX_op_bswap16_i64, { R, R } },
- { INDEX_op_bswap32_i64, { R, R } },
- { INDEX_op_bswap64_i64, { R, R } },
- { INDEX_op_not_i64, { R, R } },
- { INDEX_op_neg_i64, { R, R } },
-
- { INDEX_op_qemu_ld_i32, { R, L } },
- { INDEX_op_qemu_ld_i64, { R64, L } },
-
- { INDEX_op_qemu_st_i32, { R, S } },
- { INDEX_op_qemu_st_i64, { R64, S } },
-
- { INDEX_op_ext8s_i32, { R, R } },
- { INDEX_op_ext16s_i32, { R, R } },
- { INDEX_op_ext8u_i32, { R, R } },
- { INDEX_op_ext16u_i32, { R, R } },
-
- { INDEX_op_bswap16_i32, { R, R } },
- { INDEX_op_bswap32_i32, { R, R } },
-
- { INDEX_op_mb, { } },
- { -1 },
-};
-
-static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
- int i, n = ARRAY_SIZE(tcg_target_op_defs);
+ switch (op) {
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ return C_O1_I1(r, r);
- for (i = 0; i < n; ++i) {
- if (tcg_target_op_defs[i].op == op) {
- return &tcg_target_op_defs[i];
- }
+ case INDEX_op_st8_i32:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i64:
+ return C_O0_I2(r, r);
+
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ return C_O1_I2(r, r, r);
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_eqv_i32:
+ case INDEX_op_eqv_i64:
+ case INDEX_op_nand_i32:
+ case INDEX_op_nand_i64:
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ /* TODO: Does R, RI, RI result in faster code than R, R, RI? */
+ return C_O1_I2(r, ri, ri);
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ return C_O1_I2(r, 0, r);
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return C_O0_I2(r, ri);
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ return C_O1_I2(r, r, ri);
+
+#if TCG_TARGET_REG_BITS == 32
+ /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
+ case INDEX_op_add2_i32:
+ case INDEX_op_sub2_i32:
+ return C_O2_I4(r, r, r, r, r, r);
+ case INDEX_op_brcond2_i32:
+ return C_O0_I4(r, r, ri, ri);
+ case INDEX_op_mulu2_i32:
+ return C_O2_I2(r, r, r, r);
+ case INDEX_op_setcond2_i32
+ return C_O1_I4(r, r, r, ri, ri);
+#endif
+
+ case INDEX_op_qemu_ld_i32:
+ return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
+ ? C_O1_I1(r, r)
+ : C_O1_I2(r, r, r));
+ case INDEX_op_qemu_ld_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
+ : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r)
+ : C_O2_I2(r, r, r, r));
+ case INDEX_op_qemu_st_i32:
+ return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
+ ? C_O0_I2(r, r)
+ : C_O0_I3(r, r, r));
+ case INDEX_op_qemu_st_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
+ : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r)
+ : C_O0_I4(r, r, r, r));
+
+ default:
+ g_assert_not_reached();
}
- return NULL;
}
static const int tcg_target_reg_alloc_order[] = {
--
2.25.1