Rename to use "memop_" prefix, like other functions
that operate on MemOp.
Reviewed-by: Helge Deller <deller@gmx.de>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/memop.h | 4 ++--
accel/tcg/cputlb.c | 4 ++--
accel/tcg/user-exec.c | 4 ++--
target/arm/tcg/translate-a64.c | 4 ++--
target/xtensa/translate.c | 2 +-
tcg/tcg-op-ldst.c | 6 +++---
tcg/tcg.c | 2 +-
tcg/arm/tcg-target.c.inc | 4 ++--
tcg/sparc64/tcg-target.c.inc | 2 +-
9 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/include/exec/memop.h b/include/exec/memop.h
index 97720a8ee7..f53bf618c6 100644
--- a/include/exec/memop.h
+++ b/include/exec/memop.h
@@ -171,12 +171,12 @@ static inline bool memop_big_endian(MemOp op)
}
/**
- * get_alignment_bits
+ * memop_alignment_bits:
* @memop: MemOp value
*
* Extract the alignment size from the memop.
*/
-static inline unsigned get_alignment_bits(MemOp memop)
+static inline unsigned memop_alignment_bits(MemOp memop)
{
unsigned a = memop & MO_AMASK;
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index fd6459b695..a975fe5f89 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1709,7 +1709,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
/* Handle CPU specific unaligned behaviour */
- a_bits = get_alignment_bits(l->memop);
+ a_bits = memop_alignment_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
@@ -1797,7 +1797,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 7ddc47b0ba..08a6df9987 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -959,7 +959,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
@@ -1241,7 +1241,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 071b6349fc..ec0b1ee252 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -294,7 +294,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
ret = tcg_temp_new_i64();
@@ -326,7 +326,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
ret = tcg_temp_new_i64();
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 75b7bfda4c..f4da4a40f9 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -521,7 +521,7 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop,
mop |= MO_ALIGN;
}
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
- tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop));
+ tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop));
}
return mop;
}
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index 23dc807f11..a318011229 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -45,7 +45,7 @@ static void check_max_alignment(unsigned a_bits)
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
{
- unsigned a_bits = get_alignment_bits(op);
+ unsigned a_bits = memop_alignment_bits(op);
check_max_alignment(a_bits);
@@ -559,7 +559,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
/* In serial mode, reduce atomicity. */
@@ -676,7 +676,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
/* In serial mode, reduce atomicity. */
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 34e3056380..5decd83cf4 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -5506,7 +5506,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
MemOp host_atom, bool allow_two_ops)
{
- MemOp align = get_alignment_bits(opc);
+ MemOp align = memop_alignment_bits(opc);
MemOp size = opc & MO_SIZE;
MemOp half = size ? size - 1 : 0;
MemOp atom = opc & MO_ATOM_MASK;
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 3de5f50b62..56072d89a2 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -1587,7 +1587,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* LDRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
break;
@@ -1691,7 +1691,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* STRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
} else {
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 176c98740b..32f9ec24b5 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -1133,7 +1133,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* Otherwise, test for at least natural alignment and defer
* everything else to the helper functions.
*/
- if (s_bits != get_alignment_bits(opc)) {
+ if (s_bits != memop_alignment_bits(opc)) {
tcg_debug_assert(check_fit_tl(a_mask, 13));
tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
--
2.43.0