Add support for passing TLB_BSWAP flag from powerpc booke206 MMU
Fix instruction fetches from LE pages being treated as MMIO
This change should not affect SPARC, as its instruction fetches are always BE
Signed-off-by: Danila Zhebryakov <d.zhebryakov@yandex.ru>
---
accel/tcg/cputlb.c | 12 +++++-------
target/ppc/cpu.h | 4 ++++
target/ppc/mmu-booke.c | 5 +++++
target/ppc/mmu_helper.c | 17 +++++++++++++++--
target/ppc/translate.c | 29 ++++++++++++++++++++++++++++-
5 files changed, 57 insertions(+), 10 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 841b54e41d..396e510f1b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1401,7 +1401,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
- if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
+ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY
+ | TLB_CHECK_ALIGNED | TLB_BSWAP))
|| (access_type != MMU_INST_FETCH && force_mmio)) {
*phost = NULL;
return TLB_MMIO;
@@ -1792,12 +1793,9 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
}
- /*
- * Since target/sparc is the only user of TLB_BSWAP, and all
- * Sparc accesses are aligned, any treatment across two pages
- * would be arbitrary. Refuse it until there's a use.
- */
- tcg_debug_assert((flags & TLB_BSWAP) == 0);
+ if (unlikely(flags & TLB_BSWAP)) {
+ l->memop ^= MO_BSWAP;
+ }
}
return crosspage;
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 6b90543811..127b05c865 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1186,6 +1186,10 @@ struct ppc_radix_page_info {
uint32_t entries[PPC_PAGE_SIZES_MAX_SZ];
};
+/*****************************************************************************/
+/* PowerPC usage of the PAGE_TARGET_1 bit for TLB little-endian bit */
+#define PAGE_LE PAGE_TARGET_1
+
/*****************************************************************************/
/* Dynamic Execution Control Register */
diff --git a/target/ppc/mmu-booke.c b/target/ppc/mmu-booke.c
index 55e5dd7c6b..dc72bbf21f 100644
--- a/target/ppc/mmu-booke.c
+++ b/target/ppc/mmu-booke.c
@@ -357,6 +357,11 @@ found_tlb:
}
*prot = 0;
+
+ if (tlb->mas2 & MAS2_E) {
+ *prot |= PAGE_LE;
+ }
+
if (pr) {
if (tlb->mas7_3 & MAS3_UR) {
*prot |= PAGE_READ;
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index ac60705402..20282edaaa 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -27,6 +27,7 @@
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
#include "exec/log.h"
#include "helper_regs.h"
#include "qemu/error-report.h"
@@ -1368,8 +1369,20 @@ bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
if (ppc_xlate(cpu, eaddr, access_type, &raddr,
&page_size, &prot, mmu_idx, !probe)) {
- tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
- prot, mmu_idx, 1UL << page_size);
+ if (prot & PAGE_LE) {
+ CPUTLBEntryFull full = {
+ .phys_addr = raddr & TARGET_PAGE_MASK,
+ .attrs = MEMTXATTRS_UNSPECIFIED,
+ .prot = prot,
+ .lg_page_size = ctz64(1UL << page_size),
+ .tlb_fill_flags = TLB_BSWAP
+ };
+ tlb_set_page_full(cs, mmu_idx, eaddr & TARGET_PAGE_MASK, &full);
+
+ } else {
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ prot, mmu_idx, 1UL << page_size);
+ }
return true;
}
if (probe) {
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 27f90c3cc5..8cf50a0221 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -24,7 +24,9 @@
#include "exec/target_page.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
+#include "accel/tcg/probe.h"
#include "qemu/host-utils.h"
+#include "exec/tlb-flags.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
@@ -219,6 +221,27 @@ static inline bool need_byteswap(const DisasContext *ctx)
#endif
}
+#ifndef CONFIG_USER_ONLY
+static bool is_page_little_endian(CPUPPCState *env, vaddr addr)
+{
+ CPUTLBEntryFull *full;
+ void *host;
+ int mmu_idx = ppc_env_mmu_index(env, true);
+ int flags;
+
+ flags = probe_access_full_mmu(env, addr, 0, MMU_INST_FETCH, mmu_idx,
+ &host, &full);
+ assert(!(flags & TLB_INVALID_MASK));
+
+ return full->tlb_fill_flags & TLB_BSWAP;
+}
+#else
+static bool is_page_little_endian(CPUPPCState *env, vaddr addr)
+{
+ return false;
+}
+#endif
+
/* True when active word size < size of target_long. */
#ifdef TARGET_PPC64
# define NARROW_MODE(C) (!(C)->sf_mode)
@@ -6577,7 +6600,11 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
ctx->cia = pc = ctx->base.pc_next;
- insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
+ bool tlb_is_le = is_page_little_endian(env, ctx->base.pc_next);
+
+
+ insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx)
+ || tlb_is_le);
ctx->base.pc_next = pc += 4;
if (!is_prefix_insn(ctx, insn)) {
--
2.47.2
On 8/21/25 21:33, Danila Zhebryakov wrote:
> Add support for passing TLB_BSWAP flag from powerpc booke206 MMU
> Fix instruction fetches from LE pages being treated as MMIO
> This change should not affect SPARC, as its instruction fetches are always BE
>
> Signed-off-by: Danila Zhebryakov <d.zhebryakov@yandex.ru>
> ---
> accel/tcg/cputlb.c | 12 +++++-------
> target/ppc/cpu.h | 4 ++++
> target/ppc/mmu-booke.c | 5 +++++
> target/ppc/mmu_helper.c | 17 +++++++++++++++--
> target/ppc/translate.c | 29 ++++++++++++++++++++++++++++-
> 5 files changed, 57 insertions(+), 10 deletions(-)
>
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 841b54e41d..396e510f1b 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -1401,7 +1401,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
> flags |= full->slow_flags[access_type];
>
> /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
> - if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
> + if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY
> + | TLB_CHECK_ALIGNED | TLB_BSWAP))
> || (access_type != MMU_INST_FETCH && force_mmio)) {
> *phost = NULL;
> return TLB_MMIO;
> @@ -1792,12 +1793,9 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
> mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
> }
>
> - /*
> - * Since target/sparc is the only user of TLB_BSWAP, and all
> - * Sparc accesses are aligned, any treatment across two pages
> - * would be arbitrary. Refuse it until there's a use.
> - */
> - tcg_debug_assert((flags & TLB_BSWAP) == 0);
> + if (unlikely(flags & TLB_BSWAP)) {
> + l->memop ^= MO_BSWAP;
> + }
You should replace the comment, noting that E500 detects endianness on the lowest memory
address.
> +/*****************************************************************************/
> +/* PowerPC usage of the PAGE_TARGET_1 bit for TLB little-endian bit */
> +#define PAGE_LE PAGE_TARGET_1
PAGE_TARGET_1 is a user-only thing...
> diff --git a/target/ppc/mmu-booke.c b/target/ppc/mmu-booke.c
> index 55e5dd7c6b..dc72bbf21f 100644
> --- a/target/ppc/mmu-booke.c
> +++ b/target/ppc/mmu-booke.c
> @@ -357,6 +357,11 @@ found_tlb:
> }
>
> *prot = 0;
> +
> + if (tlb->mas2 & MAS2_E) {
> + *prot |= PAGE_LE;
> + }
> +
> if (pr) {
> if (tlb->mas7_3 & MAS3_UR) {
> *prot |= PAGE_READ;
> diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
> index ac60705402..20282edaaa 100644
> --- a/target/ppc/mmu_helper.c
> +++ b/target/ppc/mmu_helper.c
> @@ -27,6 +27,7 @@
> #include "exec/cputlb.h"
> #include "exec/page-protection.h"
> #include "exec/target_page.h"
> +#include "exec/tlb-flags.h"
> #include "exec/log.h"
> #include "helper_regs.h"
> #include "qemu/error-report.h"
> @@ -1368,8 +1369,20 @@ bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
>
> if (ppc_xlate(cpu, eaddr, access_type, &raddr,
> &page_size, &prot, mmu_idx, !probe)) {
> - tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
> - prot, mmu_idx, 1UL << page_size);
> + if (prot & PAGE_LE) {
> + CPUTLBEntryFull full = {
> + .phys_addr = raddr & TARGET_PAGE_MASK,
> + .attrs = MEMTXATTRS_UNSPECIFIED,
> + .prot = prot,
> + .lg_page_size = ctz64(1UL << page_size),
> + .tlb_fill_flags = TLB_BSWAP
> + };
> + tlb_set_page_full(cs, mmu_idx, eaddr & TARGET_PAGE_MASK, &full);
> +
> + } else {
> + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
> + prot, mmu_idx, 1UL << page_size);
> + }
(0) ctz64(1 << x) == x.
(1) Ideally convert ppc to *always* use tlb_set_page_full.
(2) Pass the CPUTLBEntryFull struct down to the helpers to be
filled in and then you don't need to abuse PAGE_TARGET_1;
you can set .tlb_fill_flags directly for MAS2_E.
> --- a/target/ppc/translate.c
> +++ b/target/ppc/translate.c
> @@ -24,7 +24,9 @@
> #include "exec/target_page.h"
> #include "tcg/tcg-op.h"
> #include "tcg/tcg-op-gvec.h"
> +#include "accel/tcg/probe.h"
> #include "qemu/host-utils.h"
> +#include "exec/tlb-flags.h"
>
> #include "exec/helper-proto.h"
> #include "exec/helper-gen.h"
> @@ -219,6 +221,27 @@ static inline bool need_byteswap(const DisasContext *ctx)
> #endif
> }
>
> +#ifndef CONFIG_USER_ONLY
> +static bool is_page_little_endian(CPUPPCState *env, vaddr addr)
> +{
> + CPUTLBEntryFull *full;
> + void *host;
> + int mmu_idx = ppc_env_mmu_index(env, true);
> + int flags;
> +
> + flags = probe_access_full_mmu(env, addr, 0, MMU_INST_FETCH, mmu_idx,
> + &host, &full);
> + assert(!(flags & TLB_INVALID_MASK));
> +
> + return full->tlb_fill_flags & TLB_BSWAP;
> +}
> +#else
> +static bool is_page_little_endian(CPUPPCState *env, vaddr addr)
> +{
> + return false;
> +}
> +#endif
> +
> /* True when active word size < size of target_long. */
> #ifdef TARGET_PPC64
> # define NARROW_MODE(C) (!(C)->sf_mode)
> @@ -6577,7 +6600,11 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
> ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
>
> ctx->cia = pc = ctx->base.pc_next;
> - insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
> + bool tlb_is_le = is_page_little_endian(env, ctx->base.pc_next);
> +
> +
> + insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx)
> + || tlb_is_le);
You should probe the page once during ppc_tr_init_disas_context, not for every insn. You
can skip this probe unless E500.
r~
© 2016 - 2025 Red Hat, Inc.