Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-op-ldst.c | 113 +++++++++++-----------------------------------
1 file changed, 27 insertions(+), 86 deletions(-)
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index 7716c3ad7c..55bfbf3a20 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -106,24 +106,12 @@ static void gen_ldst2(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
static void gen_ld_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
{
- if (TCG_TARGET_REG_BITS == 32) {
- gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I64,
- tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
- addr, oi);
- } else {
- gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
- }
+ gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
}
static void gen_st_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
{
- if (TCG_TARGET_REG_BITS == 32) {
- gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I64,
- tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
- addr, oi);
- } else {
- gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
- }
+ gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
}
static void tcg_gen_req_mo(TCGBar type)
@@ -143,7 +131,7 @@ static TCGTemp *tci_extend_addr(TCGTemp *addr)
* Compare to the extension performed by tcg_out_{ld,st}_helper_args
* for native code generation.
*/
- if (TCG_TARGET_REG_BITS == 64 && tcg_ctx->addr_type == TCG_TYPE_I32) {
+ if (tcg_ctx->addr_type == TCG_TYPE_I32) {
TCGv_i64 temp = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
return tcgv_i64_temp(temp);
@@ -356,16 +344,6 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
TCGv_i64 copy_addr;
TCGTemp *addr_new;
- if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
- tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
- if (memop & MO_SIGN) {
- tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
- } else {
- tcg_gen_movi_i32(TCGV_HIGH(val), 0);
- }
- return;
- }
-
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
orig_oi = oi = make_memop_idx(memop, idx);
@@ -421,11 +399,6 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
MemOpIdx orig_oi, oi;
TCGTemp *addr_new;
- if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
- tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
- return;
- }
-
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
orig_oi = oi = make_memop_idx(memop, idx);
@@ -577,7 +550,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
orig_oi = make_memop_idx(memop, idx);
/* TODO: For now, force 32-bit hosts to use the helper. */
- if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
+ if (TCG_TARGET_HAS_qemu_ldst_i128) {
TCGv_i64 lo, hi;
bool need_bswap = false;
MemOpIdx oi = orig_oi;
@@ -691,7 +664,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
/* TODO: For now, force 32-bit hosts to use the helper. */
- if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
+ if (TCG_TARGET_HAS_qemu_ldst_i128) {
TCGv_i64 lo, hi;
MemOpIdx oi = orig_oi;
bool need_bswap = false;
@@ -950,17 +923,6 @@ static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
{
TCGv_i64 t1, t2;
- if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
- tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
- TCGV_LOW(newv), idx, memop);
- if (memop & MO_SIGN) {
- tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
- } else {
- tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
- }
- return;
- }
-
t1 = tcg_temp_ebb_new_i64();
t2 = tcg_temp_ebb_new_i64();
@@ -1019,17 +981,6 @@ static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
* is removed.
*/
tcg_gen_movi_i64(retv, 0);
- return;
- }
-
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
- TCGV_LOW(newv), idx, memop);
- if (memop & MO_SIGN) {
- tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
- } else {
- tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
- }
} else {
TCGv_i32 c32 = tcg_temp_ebb_new_i32();
TCGv_i32 n32 = tcg_temp_ebb_new_i32();
@@ -1064,43 +1015,33 @@ static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
TCGv_i128 cmpv, TCGv_i128 newv,
TCGArg idx, MemOp memop)
{
- if (TCG_TARGET_REG_BITS == 32) {
- /* Inline expansion below is simply too large for 32-bit hosts. */
- MemOpIdx oi = make_memop_idx(memop, idx);
- TCGv_i64 a64 = maybe_extend_addr64(addr);
+ TCGv_i128 oldv = tcg_temp_ebb_new_i128();
+ TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
+ TCGv_i64 z = tcg_constant_i64(0);
- gen_helper_nonatomic_cmpxchgo(retv, tcg_env, a64, cmpv, newv,
- tcg_constant_i32(oi));
- maybe_free_addr64(a64);
- } else {
- TCGv_i128 oldv = tcg_temp_ebb_new_i128();
- TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- TCGv_i64 z = tcg_constant_i64(0);
+ tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
- tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
+ /* Compare i128 */
+ tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
+ tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
+ tcg_gen_or_i64(t0, t0, t1);
- /* Compare i128 */
- tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
- tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
- tcg_gen_or_i64(t0, t0, t1);
+ /* tmpv = equal ? newv : oldv */
+ tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
+ TCGV128_LOW(newv), TCGV128_LOW(oldv));
+ tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
+ TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
- /* tmpv = equal ? newv : oldv */
- tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
- TCGV128_LOW(newv), TCGV128_LOW(oldv));
- tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
- TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
+ /* Unconditional writeback. */
+ tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
+ tcg_gen_mov_i128(retv, oldv);
- /* Unconditional writeback. */
- tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
- tcg_gen_mov_i128(retv, oldv);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i128(tmpv);
- tcg_temp_free_i128(oldv);
- }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i128(tmpv);
+ tcg_temp_free_i128(oldv);
}
void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
--
2.43.0