In a few cases, we're able to remove some manual replication.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 10 +++++-----
target/arm/translate-sve.c | 12 +++++-------
target/arm/translate.c | 9 ++++++---
3 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 7580e46367..095638e09a 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -519,7 +519,7 @@ static void clear_vec_high(DisasContext *s, bool is_q, int rd)
tcg_temp_free_i64(tcg_zero);
}
if (vsz > 16) {
- tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
+ tcg_gen_gvec_dup_imm(MO_64, ofs + 16, vsz - 16, vsz - 16, 0);
}
}
@@ -7794,8 +7794,8 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
/* MOVI or MVNI, with MVNI negation handled above. */
- tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
- vec_full_reg_size(s), imm);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
+ vec_full_reg_size(s), imm);
} else {
/* ORR or BIC, with BIC negation to AND handled above. */
if (is_neg) {
@@ -10223,8 +10223,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
if (is_u) {
if (shift == 8 << size) {
/* Shift count the same size as element size produces zero. */
- tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
- is_q ? 16 : 8, vec_full_reg_size(s), 0);
+ tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
+ is_q ? 16 : 8, vec_full_reg_size(s), 0);
} else {
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index b35bad245e..6c8bda4e4c 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -177,7 +177,7 @@ static bool do_mov_z(DisasContext *s, int rd, int rn)
static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
{
unsigned vsz = vec_full_reg_size(s);
- tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), vsz, vsz, word);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
}
/* Invoke a vector expander on two Pregs. */
@@ -1453,7 +1453,7 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
unsigned oprsz = size_for_gvec(setsz / 8);
if (oprsz * 8 == setsz) {
- tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
+ tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
goto done;
}
}
@@ -2044,7 +2044,7 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
} else {
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, 0);
+ tcg_gen_gvec_dup_imm(esz, dofs, vsz, vsz, 0);
}
}
return true;
@@ -3260,9 +3260,7 @@ static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
/* Decode the VFP immediate. */
imm = vfp_expand_imm(a->esz, a->imm);
- imm = dup_const(a->esz, imm);
-
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, imm);
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
}
return true;
}
@@ -3276,7 +3274,7 @@ static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
unsigned vsz = vec_full_reg_size(s);
int dofs = vec_full_reg_offset(s, a->rd);
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, dup_const(a->esz, a->imm));
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
}
return true;
}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 9f9f4e19e0..af4d3ff4c9 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5386,7 +5386,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
MIN(shift, (8 << size) - 1),
vec_size, vec_size);
} else if (shift >= 8 << size) {
- tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ tcg_gen_gvec_dup_imm(MO_8, rd_ofs, vec_size,
+ vec_size, 0);
} else {
tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@@ -5437,7 +5438,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* architecturally valid and results in zero.
*/
if (shift >= 8 << size) {
- tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ tcg_gen_gvec_dup_imm(size, rd_ofs,
+ vec_size, vec_size, 0);
} else {
tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@@ -5783,7 +5785,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i64(t64);
} else {
- tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
+ tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
+ vec_size, imm);
}
}
}
--
2.20.1
Richard Henderson <richard.henderson@linaro.org> writes:
> In a few cases, we're able to remove some manual replication.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> target/arm/translate-a64.c | 10 +++++-----
> target/arm/translate-sve.c | 12 +++++-------
> target/arm/translate.c | 9 ++++++---
> 3 files changed, 16 insertions(+), 15 deletions(-)
>
> diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
> index 7580e46367..095638e09a 100644
> --- a/target/arm/translate-a64.c
> +++ b/target/arm/translate-a64.c
> @@ -519,7 +519,7 @@ static void clear_vec_high(DisasContext *s, bool is_q, int rd)
> tcg_temp_free_i64(tcg_zero);
> }
> if (vsz > 16) {
> - tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
> + tcg_gen_gvec_dup_imm(MO_64, ofs + 16, vsz - 16, vsz - 16, 0);
> }
> }
>
> @@ -7794,8 +7794,8 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
>
> if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
> /* MOVI or MVNI, with MVNI negation handled above. */
> - tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
> - vec_full_reg_size(s), imm);
> + tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
> + vec_full_reg_size(s), imm);
> } else {
> /* ORR or BIC, with BIC negation to AND handled above. */
> if (is_neg) {
> @@ -10223,8 +10223,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
> if (is_u) {
> if (shift == 8 << size) {
> /* Shift count the same size as element size produces zero. */
> - tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
> - is_q ? 16 : 8, vec_full_reg_size(s), 0);
> + tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
> + is_q ? 16 : 8, vec_full_reg_size(s), 0);
> } else {
> gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
> }
> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
> index b35bad245e..6c8bda4e4c 100644
> --- a/target/arm/translate-sve.c
> +++ b/target/arm/translate-sve.c
> @@ -177,7 +177,7 @@ static bool do_mov_z(DisasContext *s, int rd, int rn)
> static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
> {
> unsigned vsz = vec_full_reg_size(s);
> - tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), vsz, vsz, word);
> + tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
> }
>
> /* Invoke a vector expander on two Pregs. */
> @@ -1453,7 +1453,7 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
> unsigned oprsz = size_for_gvec(setsz / 8);
>
> if (oprsz * 8 == setsz) {
> - tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
> + tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
> goto done;
> }
> }
> @@ -2044,7 +2044,7 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
> unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
> tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
> } else {
> - tcg_gen_gvec_dup64i(dofs, vsz, vsz, 0);
> + tcg_gen_gvec_dup_imm(esz, dofs, vsz, vsz, 0);
> }
> }
> return true;
> @@ -3260,9 +3260,7 @@ static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
>
> /* Decode the VFP immediate. */
> imm = vfp_expand_imm(a->esz, a->imm);
> - imm = dup_const(a->esz, imm);
> -
> - tcg_gen_gvec_dup64i(dofs, vsz, vsz, imm);
> + tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
> }
> return true;
> }
> @@ -3276,7 +3274,7 @@ static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
> unsigned vsz = vec_full_reg_size(s);
> int dofs = vec_full_reg_offset(s, a->rd);
>
> - tcg_gen_gvec_dup64i(dofs, vsz, vsz, dup_const(a->esz, a->imm));
> + tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
> }
> return true;
> }
> diff --git a/target/arm/translate.c b/target/arm/translate.c
> index 9f9f4e19e0..af4d3ff4c9 100644
> --- a/target/arm/translate.c
> +++ b/target/arm/translate.c
> @@ -5386,7 +5386,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
> MIN(shift, (8 << size) - 1),
> vec_size, vec_size);
> } else if (shift >= 8 << size) {
> - tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
> + tcg_gen_gvec_dup_imm(MO_8, rd_ofs, vec_size,
> + vec_size, 0);
> } else {
> tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
> vec_size, vec_size);
> @@ -5437,7 +5438,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
> * architecturally valid and results in zero.
> */
> if (shift >= 8 << size) {
> - tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
> + tcg_gen_gvec_dup_imm(size, rd_ofs,
> + vec_size, vec_size, 0);
> } else {
> tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
> vec_size, vec_size);
> @@ -5783,7 +5785,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
> }
> tcg_temp_free_i64(t64);
> } else {
> - tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
> + tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
> + vec_size, imm);
> }
> }
> }
--
Alex Bennée
© 2016 - 2026 Red Hat, Inc.