Following the pattern for 'M' and Zmmul check if either the 'A'
extension is enabled or the appropriate split extension for the
instruction.
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
---
target/riscv/insn_trans/trans_rva.c.inc | 56 +++++++++++++++----------
1 file changed, 34 insertions(+), 22 deletions(-)
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
index f0368de3e4..267930e5bc 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -18,6 +18,18 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define REQUIRE_A_OR_ZAAMO(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_A_OR_ZALRSC(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \
+ return false; \
+ } \
+} while (0)
+
static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv src1;
@@ -96,143 +108,143 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZALRSC(ctx);
return gen_lr(ctx, a, (MO_ALIGN | MO_TESL));
}
static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZALRSC(ctx);
return gen_sc(ctx, a, (MO_ALIGN | MO_TESL));
}
static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
{
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZALRSC(ctx);
return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ);
}
static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZALRSC(ctx);
return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
{
REQUIRE_64BIT(ctx);
- REQUIRE_EXT(ctx, RVA);
+ REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
}
--
2.43.0
On 1/19/24 08:21, Rob Bradford wrote: > Following the pattern for 'M' and Zmmul check if either the 'A' > extension is enabled or the appropriate split extension for the > instruction. > > Signed-off-by: Rob Bradford <rbradford@rivosinc.com> > --- Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> > target/riscv/insn_trans/trans_rva.c.inc | 56 +++++++++++++++---------- > 1 file changed, 34 insertions(+), 22 deletions(-) > > diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc > index f0368de3e4..267930e5bc 100644 > --- a/target/riscv/insn_trans/trans_rva.c.inc > +++ b/target/riscv/insn_trans/trans_rva.c.inc > @@ -18,6 +18,18 @@ > * this program. If not, see <http://www.gnu.org/licenses/>. > */ > > +#define REQUIRE_A_OR_ZAAMO(ctx) do { \ > + if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \ > + return false; \ > + } \ > +} while (0) > + > +#define REQUIRE_A_OR_ZALRSC(ctx) do { \ > + if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \ > + return false; \ > + } \ > +} while (0) > + > static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) > { > TCGv src1; > @@ -96,143 +108,143 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, > > static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZALRSC(ctx); > return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); > } > > static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZALRSC(ctx); > return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) > { > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); > } > > static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZALRSC(ctx); > return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ); > } > > static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZALRSC(ctx); > return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); > } > > static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) > { > REQUIRE_64BIT(ctx); > - REQUIRE_EXT(ctx, RVA); > + REQUIRE_A_OR_ZAAMO(ctx); > return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); > }
© 2016 - 2024 Red Hat, Inc.