Avoid the use of the OptContext slots.
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 14d3d6253d..cd12985537 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -2633,30 +2633,32 @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask = -1, s_mask = 0;
+
/* We can't do any folding with a load, but we can record bits. */
switch (op->opc) {
CASE_OP_32_64(ld8s):
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
+ s_mask = INT8_MIN;
break;
CASE_OP_32_64(ld8u):
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
+ z_mask = MAKE_64BIT_MASK(0, 8);
break;
CASE_OP_32_64(ld16s):
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
+ s_mask = INT16_MIN;
break;
CASE_OP_32_64(ld16u):
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
+ z_mask = MAKE_64BIT_MASK(0, 16);
break;
case INDEX_op_ld32s_i64:
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
+ s_mask = INT32_MIN;
break;
case INDEX_op_ld32u_i64:
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
+ z_mask = MAKE_64BIT_MASK(0, 32);
break;
default:
g_assert_not_reached();
}
- return false;
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
}
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
--
2.43.0