From: Brian Cain <bcain@quicinc.com>
Co-authored-by: Taylor Simpson <ltaylorsimpson@gmail.com>
Co-authored-by: Michael Lambert <mlambert@quicinc.com>
Co-authored-by: Sid Manning <sidneym@quicinc.com>
Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com>
Signed-off-by: Brian Cain <brian.cain@oss.qualcomm.com>
---
target/hexagon/cpu-param.h | 4 +
target/hexagon/cpu.h | 13 +
target/hexagon/hex_mmu.h | 30 +++
target/hexagon/internal.h | 3 +
target/hexagon/cpu.c | 27 +-
target/hexagon/hex_mmu.c | 528 +++++++++++++++++++++++++++++++++++++
target/hexagon/machine.c | 30 +++
target/hexagon/translate.c | 2 +-
target/hexagon/meson.build | 3 +-
9 files changed, 637 insertions(+), 3 deletions(-)
create mode 100644 target/hexagon/hex_mmu.h
create mode 100644 target/hexagon/hex_mmu.c
diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h
index 45ee7b4640..fdc0a26b94 100644
--- a/target/hexagon/cpu-param.h
+++ b/target/hexagon/cpu-param.h
@@ -18,7 +18,11 @@
#ifndef HEXAGON_CPU_PARAM_H
#define HEXAGON_CPU_PARAM_H
+#ifdef CONFIG_USER_ONLY
#define TARGET_PAGE_BITS 16 /* 64K pages */
+#else
+#define TARGET_PAGE_BITS 12 /* 4K pages */
+#endif
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
index b0ccaf36f9..04debda8c2 100644
--- a/target/hexagon/cpu.h
+++ b/target/hexagon/cpu.h
@@ -31,6 +31,8 @@
#include "mmvec/mmvec.h"
#include "hw/registerfields.h"
+typedef struct CPUHexagonTLBContext CPUHexagonTLBContext;
+
#define NUM_PREGS 4
#define TOTAL_PER_THREAD_REGS 64
@@ -126,6 +128,7 @@ typedef struct CPUArchState {
target_ulong tlb_lock_count;
target_ulong k0_lock_count;
target_ulong next_PC;
+ CPUHexagonTLBContext *hex_tlb;
#endif
target_ulong new_value_usr;
@@ -172,12 +175,15 @@ struct ArchCPU {
bool lldb_compat;
target_ulong lldb_stack_adjust;
bool short_circuit;
+#ifndef CONFIG_USER_ONLY
uint32_t num_tlbs;
+#endif
};
#include "cpu_bits.h"
FIELD(TB_FLAGS, IS_TIGHT_LOOP, 0, 1)
+FIELD(TB_FLAGS, MMU_INDEX, 1, 3)
G_NORETURN void hexagon_raise_exception_err(CPUHexagonState *env,
uint32_t exception,
@@ -189,6 +195,7 @@ uint32_t hexagon_sreg_read(CPUHexagonState *env, uint32_t reg);
void hexagon_gdb_sreg_write(CPUHexagonState *env, uint32_t reg, uint32_t val);
#endif
+#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *flags)
{
@@ -202,6 +209,12 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
if (*pc & PCALIGN_MASK) {
hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0);
}
+#ifndef CONFIG_USER_ONLY
+ hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, MMU_INDEX,
+ cpu_mmu_index(env_cpu(env), false));
+#else
+ hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, MMU_INDEX, MMU_USER_IDX);
+#endif
}
typedef HexagonCPU ArchCPU;
diff --git a/target/hexagon/hex_mmu.h b/target/hexagon/hex_mmu.h
new file mode 100644
index 0000000000..fae8aefcac
--- /dev/null
+++ b/target/hexagon/hex_mmu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright(c) 2019-2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HEXAGON_MMU_H
+#define HEXAGON_MMU_H
+
+#include "max.h"
+
+struct CPUHexagonTLBContext {
+ uint64_t entries[MAX_TLB_ENTRIES];
+};
+
+extern void hex_tlbw(CPUHexagonState *env, uint32_t index, uint64_t value);
+extern uint32_t hex_tlb_lookup(CPUHexagonState *env, uint32_t ssr, uint32_t VA);
+extern void hex_mmu_realize(CPUHexagonState *env);
+extern void hex_mmu_on(CPUHexagonState *env);
+extern void hex_mmu_off(CPUHexagonState *env);
+extern void hex_mmu_mode_change(CPUHexagonState *env);
+extern bool hex_tlb_find_match(CPUHexagonState *env, target_ulong VA,
+ MMUAccessType access_type, hwaddr *PA, int *prot,
+ int *size, int32_t *excp, int mmu_idx);
+extern int hex_tlb_check_overlap(CPUHexagonState *env, uint64_t entry,
+ uint64_t index);
+extern void hex_tlb_lock(CPUHexagonState *env);
+extern void hex_tlb_unlock(CPUHexagonState *env);
+void dump_mmu(CPUHexagonState *env);
+#endif
diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h
index c24c360921..120cfde7b9 100644
--- a/target/hexagon/internal.h
+++ b/target/hexagon/internal.h
@@ -40,6 +40,9 @@ void G_NORETURN do_raise_exception(CPUHexagonState *env,
target_ulong PC,
uintptr_t retaddr);
+#define hexagon_cpu_mmu_enabled(env) \
+ GET_SYSCFG_FIELD(SYSCFG_MMUEN, arch_get_system_reg(env, HEX_SREG_SYSCFG))
+
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_hexagon_cpu;
#endif
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
index 34c39cecd9..7ff678195d 100644
--- a/target/hexagon/cpu.c
+++ b/target/hexagon/cpu.c
@@ -28,6 +28,7 @@
#include "exec/gdbstub.h"
#include "cpu_helper.h"
#include "max.h"
+#include "hex_mmu.h"
#ifndef CONFIG_USER_ONLY
#include "sys_macros.h"
@@ -283,6 +284,18 @@ static void hexagon_restore_state_to_opc(CPUState *cs,
cpu_env(cs)->gpr[HEX_REG_PC] = data[0];
}
+
+#ifndef CONFIG_USER_ONLY
+static void mmu_reset(CPUHexagonState *env)
+{
+ CPUState *cs = env_cpu(env);
+ if (cs->cpu_index == 0) {
+ memset(env->hex_tlb, 0, sizeof(*env->hex_tlb));
+ }
+}
+#endif
+
+
static void hexagon_cpu_reset_hold(Object *obj, ResetType type)
{
CPUState *cs = CPU(obj);
@@ -310,6 +323,7 @@ static void hexagon_cpu_reset_hold(Object *obj, ResetType type)
if (cs->cpu_index == 0) {
arch_set_system_reg(env, HEX_SREG_MODECTL, 0x1);
}
+ mmu_reset(env);
arch_set_system_reg(env, HEX_SREG_HTID, cs->cpu_index);
memset(env->t_sreg, 0, sizeof(target_ulong) * NUM_SREGS);
memset(env->greg, 0, sizeof(target_ulong) * NUM_GREGS);
@@ -341,6 +355,14 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp)
return;
}
+#ifndef CONFIG_USER_ONLY
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ if (cpu->num_tlbs > MAX_TLB_ENTRIES) {
+ error_setg(errp, "Number of TLBs selected is invalid");
+ return;
+ }
+#endif
+
gdb_register_coprocessor(cs, hexagon_hvx_gdb_read_register,
hexagon_hvx_gdb_write_register,
gdb_find_static_feature("hexagon-hvx.xml"), 0);
@@ -352,9 +374,12 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp)
#endif
qemu_init_vcpu(cs);
+#ifndef CONFIG_USER_ONLY
+ CPUHexagonState *env = cpu_env(cs);
+ hex_mmu_realize(env);
+#endif
cpu_reset(cs);
#ifndef CONFIG_USER_ONLY
- CPUHexagonState *env = cpu_env(cs);
if (cs->cpu_index == 0) {
env->g_sreg = g_new0(target_ulong, NUM_SREGS);
} else {
diff --git a/target/hexagon/hex_mmu.c b/target/hexagon/hex_mmu.c
new file mode 100644
index 0000000000..54c4ba2dbf
--- /dev/null
+++ b/target/hexagon/hex_mmu.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright(c) 2019-2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "system/cpus.h"
+#include "internal.h"
+#include "exec/exec-all.h"
+#include "hex_mmu.h"
+#include "macros.h"
+#include "sys_macros.h"
+#include "reg_fields.h"
+
+#define GET_TLB_FIELD(ENTRY, FIELD) \
+ ((uint64_t)fEXTRACTU_BITS(ENTRY, reg_field_info[FIELD].width, \
+ reg_field_info[FIELD].offset))
+
+/* PPD (physical page descriptor) */
+static inline uint64_t GET_PPD(uint64_t entry)
+{
+ return GET_TLB_FIELD(entry, PTE_PPD) |
+ (GET_TLB_FIELD(entry, PTE_PA35) << reg_field_info[PTE_PPD].width);
+}
+
+#define NO_ASID (1 << 8)
+
+typedef enum {
+ PGSIZE_4K,
+ PGSIZE_16K,
+ PGSIZE_64K,
+ PGSIZE_256K,
+ PGSIZE_1M,
+ PGSIZE_4M,
+ PGSIZE_16M,
+ PGSIZE_64M,
+ PGSIZE_256M,
+ PGSIZE_1G,
+ NUM_PGSIZE_TYPES
+} tlb_pgsize_t;
+
+static const char *pgsize_str[NUM_PGSIZE_TYPES] = {
+ "4K",
+ "16K",
+ "64K",
+ "256K",
+ "1M",
+ "4M",
+ "16M",
+ "64M",
+ "256M",
+ "1G",
+};
+
+#define INVALID_MASK 0xffffffffLL
+
+static const uint64_t encmask_2_mask[] = {
+ 0x0fffLL, /* 4k, 0000 */
+ 0x3fffLL, /* 16k, 0001 */
+ 0xffffLL, /* 64k, 0010 */
+ 0x3ffffLL, /* 256k, 0011 */
+ 0xfffffLL, /* 1m, 0100 */
+ 0x3fffffLL, /* 4m, 0101 */
+ 0xffffffLL, /* 16m, 0110 */
+ 0x3ffffffLL, /* 64m, 0111 */
+ 0xfffffffLL, /* 256m, 1000 */
+ 0x3fffffffLL, /* 1g, 1001 */
+ INVALID_MASK, /* RSVD, 0111 */
+};
+
+/*
+ * @return the page size type from @a entry.
+ */
+static inline tlb_pgsize_t hex_tlb_pgsize_type(uint64_t entry)
+{
+ if (entry == 0) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: Supplied TLB entry was 0!\n", __func__);
+ return 0;
+ }
+ tlb_pgsize_t size = ctz64(entry);
+ g_assert(size < NUM_PGSIZE_TYPES);
+ return size;
+}
+
+/*
+ * @return the page size of @a entry, in bytes.
+ */
+static inline uint64_t hex_tlb_page_size_bytes(uint64_t entry)
+{
+ return 1ull << (TARGET_PAGE_BITS + 2 * hex_tlb_pgsize_type(entry));
+}
+
+static inline uint64_t hex_tlb_phys_page_num(uint64_t entry)
+{
+ uint32_t ppd = GET_PPD(entry);
+ return ppd >> 1;
+}
+
+static inline uint64_t hex_tlb_phys_addr(uint64_t entry)
+{
+ uint64_t pagemask = encmask_2_mask[hex_tlb_pgsize_type(entry)];
+ uint64_t pagenum = hex_tlb_phys_page_num(entry);
+ uint64_t PA = (pagenum << TARGET_PAGE_BITS) & (~pagemask);
+ return PA;
+}
+
+static inline uint64_t hex_tlb_virt_addr(uint64_t entry)
+{
+ return (uint64_t)GET_TLB_FIELD(entry, PTE_VPN) << TARGET_PAGE_BITS;
+}
+
+static bool hex_dump_mmu_entry(FILE *f, uint64_t entry)
+{
+ if (GET_TLB_FIELD(entry, PTE_V)) {
+ fprintf(f, "0x%016" PRIx64 ": ", entry);
+ uint64_t PA = hex_tlb_phys_addr(entry);
+ uint64_t VA = hex_tlb_virt_addr(entry);
+ fprintf(f, "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64,
+ GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G),
+ GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, PTE_ATR0));
+ fprintf(f, " ASID:0x%02" PRIx64 " VA:0x%08" PRIx64,
+ GET_TLB_FIELD(entry, PTE_ASID), VA);
+ fprintf(f,
+ " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64
+ " C:%" PRId64,
+ GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W),
+ GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U),
+ GET_TLB_FIELD(entry, PTE_C));
+ fprintf(f, " PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA,
+ pgsize_str[hex_tlb_pgsize_type(entry)],
+ hex_tlb_page_size_bytes(entry));
+ fprintf(f, "\n");
+ return true;
+ }
+
+ /* Not valid */
+ return false;
+}
+
+void dump_mmu(CPUHexagonState *env)
+{
+ int i;
+
+ HexagonCPU *cpu = env_archcpu(env);
+ for (i = 0; i < cpu->num_tlbs; i++) {
+ uint64_t entry = env->hex_tlb->entries[i];
+ if (GET_TLB_FIELD(entry, PTE_V)) {
+ qemu_printf("0x%016" PRIx64 ": ", entry);
+ uint64_t PA = hex_tlb_phys_addr(entry);
+ uint64_t VA = hex_tlb_virt_addr(entry);
+ qemu_printf(
+ "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64,
+ GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G),
+ GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, PTE_ATR0));
+ qemu_printf(" ASID:0x%02" PRIx64 " VA:0x%08" PRIx64,
+ GET_TLB_FIELD(entry, PTE_ASID), VA);
+ qemu_printf(
+ " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64
+ " C:%" PRId64,
+ GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W),
+ GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U),
+ GET_TLB_FIELD(entry, PTE_C));
+ qemu_printf(" PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA,
+ pgsize_str[hex_tlb_pgsize_type(entry)],
+ hex_tlb_page_size_bytes(entry));
+ qemu_printf("\n");
+ }
+ }
+}
+
+static inline void hex_log_tlbw(uint32_t index, uint64_t entry)
+{
+ if (qemu_loglevel_mask(CPU_LOG_MMU)) {
+ if (qemu_log_enabled()) {
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "tlbw[%03d]: ", index);
+ if (!hex_dump_mmu_entry(logfile, entry)) {
+ fprintf(logfile, "invalid\n");
+ }
+ qemu_log_unlock(logfile);
+ }
+ }
+ }
+}
+
+void hex_tlbw(CPUHexagonState *env, uint32_t index, uint64_t value)
+{
+ uint32_t myidx = fTLB_NONPOW2WRAP(fTLB_IDXMASK(index));
+ bool old_entry_valid = GET_TLB_FIELD(env->hex_tlb->entries[myidx], PTE_V);
+ if (old_entry_valid && hexagon_cpu_mmu_enabled(env)) {
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush(cs);
+ }
+ env->hex_tlb->entries[myidx] = (value);
+ hex_log_tlbw(myidx, value);
+}
+
+void hex_mmu_realize(CPUHexagonState *env)
+{
+ CPUState *cs = env_cpu(env);
+ if (cs->cpu_index == 0) {
+ env->hex_tlb = g_malloc0(sizeof(CPUHexagonTLBContext));
+ } else {
+ CPUState *cpu0_s = NULL;
+ CPUHexagonState *env0 = NULL;
+ CPU_FOREACH(cpu0_s) {
+ assert(cpu0_s->cpu_index == 0);
+ env0 = &(HEXAGON_CPU(cpu0_s)->env);
+ break;
+ }
+ env->hex_tlb = env0->hex_tlb;
+ }
+}
+
+void hex_mmu_on(CPUHexagonState *env)
+{
+ CPUState *cs = env_cpu(env);
+ qemu_log_mask(CPU_LOG_MMU, "Hexagon MMU turned on!\n");
+ tlb_flush(cs);
+}
+
+void hex_mmu_off(CPUHexagonState *env)
+{
+ CPUState *cs = env_cpu(env);
+ qemu_log_mask(CPU_LOG_MMU, "Hexagon MMU turned off!\n");
+ tlb_flush(cs);
+}
+
+void hex_mmu_mode_change(CPUHexagonState *env)
+{
+ qemu_log_mask(CPU_LOG_MMU, "Hexagon mode change!\n");
+ CPUState *cs = env_cpu(env);
+ tlb_flush(cs);
+}
+
+static inline bool hex_tlb_entry_match_noperm(uint64_t entry, uint32_t asid,
+ uint64_t VA)
+{
+ if (GET_TLB_FIELD(entry, PTE_V)) {
+ if (GET_TLB_FIELD(entry, PTE_G)) {
+ /* Global entry - ingnore ASID */
+ } else if (asid != NO_ASID) {
+ uint32_t tlb_asid = GET_TLB_FIELD(entry, PTE_ASID);
+ if (tlb_asid != asid) {
+ return false;
+ }
+ }
+
+ uint64_t page_size = hex_tlb_page_size_bytes(entry);
+ uint64_t page_start =
+ ROUND_DOWN(hex_tlb_virt_addr(entry), page_size);
+ if (page_start <= VA && VA < page_start + page_size) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline void hex_tlb_entry_get_perm(CPUHexagonState *env, uint64_t entry,
+ MMUAccessType access_type,
+ int mmu_idx, int *prot,
+ int32_t *excp)
+{
+ g_assert_not_reached();
+}
+
+static inline bool hex_tlb_entry_match(CPUHexagonState *env, uint64_t entry,
+ uint8_t asid, target_ulong VA,
+ MMUAccessType access_type, hwaddr *PA,
+ int *prot, int *size, int32_t *excp,
+ int mmu_idx)
+{
+ if (hex_tlb_entry_match_noperm(entry, asid, VA)) {
+ hex_tlb_entry_get_perm(env, entry, access_type, mmu_idx, prot, excp);
+ *PA = hex_tlb_phys_addr(entry);
+ *size = hex_tlb_page_size_bytes(entry);
+ return true;
+ }
+ return false;
+}
+
+bool hex_tlb_find_match(CPUHexagonState *env, target_ulong VA,
+ MMUAccessType access_type, hwaddr *PA, int *prot,
+ int *size, int32_t *excp, int mmu_idx)
+{
+ *PA = 0;
+ *prot = 0;
+ *size = 0;
+ *excp = 0;
+ uint32_t ssr = arch_get_system_reg(env, HEX_SREG_SSR);
+ uint8_t asid = GET_SSR_FIELD(SSR_ASID, ssr);
+ int i;
+ HexagonCPU *cpu = env_archcpu(env);
+ for (i = 0; i < cpu->num_tlbs; i++) {
+ uint64_t entry = env->hex_tlb->entries[i];
+ if (hex_tlb_entry_match(env, entry, asid, VA, access_type, PA, prot,
+ size, excp, mmu_idx)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static uint32_t hex_tlb_lookup_by_asid(CPUHexagonState *env, uint32_t asid,
+ uint32_t VA)
+{
+ g_assert_not_reached();
+}
+
+/* Called from tlbp instruction */
+uint32_t hex_tlb_lookup(CPUHexagonState *env, uint32_t ssr, uint32_t VA)
+{
+ return hex_tlb_lookup_by_asid(env, GET_SSR_FIELD(SSR_ASID, ssr), VA);
+}
+
+static bool hex_tlb_is_match(CPUHexagonState *env,
+ uint64_t entry1, uint64_t entry2,
+ bool consider_gbit)
+{
+ bool valid1 = GET_TLB_FIELD(entry1, PTE_V);
+ bool valid2 = GET_TLB_FIELD(entry2, PTE_V);
+ uint64_t size1 = hex_tlb_page_size_bytes(entry1);
+ uint64_t vaddr1 = ROUND_DOWN(hex_tlb_virt_addr(entry1), size1);
+ uint64_t size2 = hex_tlb_page_size_bytes(entry2);
+ uint64_t vaddr2 = ROUND_DOWN(hex_tlb_virt_addr(entry2), size2);
+ int asid1 = GET_TLB_FIELD(entry1, PTE_ASID);
+ int asid2 = GET_TLB_FIELD(entry2, PTE_ASID);
+ bool gbit1 = GET_TLB_FIELD(entry1, PTE_G);
+ bool gbit2 = GET_TLB_FIELD(entry2, PTE_G);
+
+ if (!valid1 || !valid2) {
+ return false;
+ }
+
+ if (((vaddr1 <= vaddr2) && (vaddr2 < (vaddr1 + size1))) ||
+ ((vaddr2 <= vaddr1) && (vaddr1 < (vaddr2 + size2)))) {
+ if (asid1 == asid2) {
+ return true;
+ }
+ if ((consider_gbit && gbit1) || gbit2) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Return codes:
+ * 0 or positive index of match
+ * -1 multiple matches
+ * -2 no match
+ */
+int hex_tlb_check_overlap(CPUHexagonState *env, uint64_t entry, uint64_t index)
+{
+ int matches = 0;
+ int last_match = 0;
+ int i;
+
+ HexagonCPU *cpu = env_archcpu(env);
+ for (i = 0; i < cpu->num_tlbs; i++) {
+ if (hex_tlb_is_match(env, entry, env->hex_tlb->entries[i], false)) {
+ matches++;
+ last_match = i;
+ }
+ }
+
+ if (matches == 1) {
+ return last_match;
+ }
+ if (matches == 0) {
+ return -2;
+ }
+ return -1;
+}
+
+static inline void print_thread(const char *str, CPUState *cs)
+{
+ g_assert(bql_locked());
+ CPUHexagonState *thread = cpu_env(cs);
+ bool is_stopped = cpu_is_stopped(cs);
+ int exe_mode = get_exe_mode(thread);
+ hex_lock_state_t lock_state = thread->tlb_lock_state;
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: threadId = %d: %s, exe_mode = %s, tlb_lock_state = %s\n",
+ str,
+ thread->threadId,
+ is_stopped ? "stopped" : "running",
+ exe_mode == HEX_EXE_MODE_OFF ? "off" :
+ exe_mode == HEX_EXE_MODE_RUN ? "run" :
+ exe_mode == HEX_EXE_MODE_WAIT ? "wait" :
+ exe_mode == HEX_EXE_MODE_DEBUG ? "debug" :
+ "unknown",
+ lock_state == HEX_LOCK_UNLOCKED ? "unlocked" :
+ lock_state == HEX_LOCK_WAITING ? "waiting" :
+ lock_state == HEX_LOCK_OWNER ? "owner" :
+ "unknown");
+}
+
+static inline void print_thread_states(const char *str)
+{
+ CPUState *cs;
+ CPU_FOREACH(cs) {
+ print_thread(str, cs);
+ }
+}
+
+void hex_tlb_lock(CPUHexagonState *env)
+{
+ qemu_log_mask(CPU_LOG_MMU, "hex_tlb_lock: %d\n", env->threadId);
+ BQL_LOCK_GUARD();
+ g_assert((env->tlb_lock_count == 0) || (env->tlb_lock_count == 1));
+
+ uint32_t syscfg = arch_get_system_reg(env, HEX_SREG_SYSCFG);
+ uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg);
+ if (tlb_lock) {
+ if (env->tlb_lock_state == HEX_LOCK_QUEUED) {
+ env->next_PC += 4;
+ env->tlb_lock_count++;
+ env->tlb_lock_state = HEX_LOCK_OWNER;
+ SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 1);
+ return;
+ }
+ if (env->tlb_lock_state == HEX_LOCK_OWNER) {
+ qemu_log_mask(CPU_LOG_MMU | LOG_GUEST_ERROR,
+ "Double tlblock at PC: 0x%x, thread may hang\n",
+ env->next_PC);
+ env->next_PC += 4;
+ CPUState *cs = env_cpu(env);
+ cpu_interrupt(cs, CPU_INTERRUPT_HALT);
+ return;
+ }
+ env->tlb_lock_state = HEX_LOCK_WAITING;
+ CPUState *cs = env_cpu(env);
+ cpu_interrupt(cs, CPU_INTERRUPT_HALT);
+ } else {
+ env->next_PC += 4;
+ env->tlb_lock_count++;
+ env->tlb_lock_state = HEX_LOCK_OWNER;
+ SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 1);
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_MMU)) {
+ qemu_log_mask(CPU_LOG_MMU, "Threads after hex_tlb_lock:\n");
+ print_thread_states("\tThread");
+ }
+}
+
+void hex_tlb_unlock(CPUHexagonState *env)
+{
+ BQL_LOCK_GUARD();
+ g_assert((env->tlb_lock_count == 0) || (env->tlb_lock_count == 1));
+
+ /* Nothing to do if the TLB isn't locked by this thread */
+ uint32_t syscfg = arch_get_system_reg(env, HEX_SREG_SYSCFG);
+ uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg);
+ if ((tlb_lock == 0) ||
+ (env->tlb_lock_state != HEX_LOCK_OWNER)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "thread %d attempted to tlbunlock without having the "
+ "lock, tlb_lock state = %d\n",
+ env->threadId, env->tlb_lock_state);
+ g_assert(env->tlb_lock_state != HEX_LOCK_WAITING);
+ return;
+ }
+
+ env->tlb_lock_count--;
+ env->tlb_lock_state = HEX_LOCK_UNLOCKED;
+ SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 0);
+
+ /* Look for a thread to unlock */
+ unsigned int this_threadId = env->threadId;
+ CPUHexagonState *unlock_thread = NULL;
+ CPUState *cs;
+ CPU_FOREACH(cs) {
+ CPUHexagonState *thread = cpu_env(cs);
+
+ /*
+ * The hardware implements round-robin fairness, so we look for threads
+ * starting at env->threadId + 1 and incrementing modulo the number of
+ * threads.
+ *
+ * To implement this, we check if thread is a earlier in the modulo
+ * sequence than unlock_thread.
+ * if unlock thread is higher than this thread
+ * thread must be between this thread and unlock_thread
+ * else
+ * thread higher than this thread is ahead of unlock_thread
+ * thread must be lower then unlock thread
+ */
+ if (thread->tlb_lock_state == HEX_LOCK_WAITING) {
+ if (!unlock_thread) {
+ unlock_thread = thread;
+ } else if (unlock_thread->threadId > this_threadId) {
+ if (this_threadId < thread->threadId &&
+ thread->threadId < unlock_thread->threadId) {
+ unlock_thread = thread;
+ }
+ } else {
+ if (thread->threadId > this_threadId) {
+ unlock_thread = thread;
+ }
+ if (thread->threadId < unlock_thread->threadId) {
+ unlock_thread = thread;
+ }
+ }
+ }
+ }
+ if (unlock_thread) {
+ cs = env_cpu(unlock_thread);
+ print_thread("\tWaiting thread found", cs);
+ unlock_thread->tlb_lock_state = HEX_LOCK_QUEUED;
+ SET_SYSCFG_FIELD(unlock_thread, SYSCFG_TLBLOCK, 1);
+ cpu_interrupt(cs, CPU_INTERRUPT_TLB_UNLOCK);
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_MMU)) {
+ qemu_log_mask(CPU_LOG_MMU, "Threads after hex_tlb_unlock:\n");
+ print_thread_states("\tThread");
+ }
+
+}
+
diff --git a/target/hexagon/machine.c b/target/hexagon/machine.c
index 9fdafb4573..fcdbacf9fd 100644
--- a/target/hexagon/machine.c
+++ b/target/hexagon/machine.c
@@ -7,6 +7,33 @@
#include "qemu/osdep.h"
#include "migration/cpu.h"
#include "cpu.h"
+#include "hex_mmu.h"
+
+static int get_hex_tlb_ptr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ CPUHexagonTLBContext *tlb = pv;
+ for (int i = 0; i < ARRAY_SIZE(tlb->entries); i++) {
+ tlb->entries[i] = qemu_get_be64(f);
+ }
+ return 0;
+}
+
+static int put_hex_tlb_ptr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ CPUHexagonTLBContext *tlb = pv;
+ for (int i = 0; i < ARRAY_SIZE(tlb->entries); i++) {
+ qemu_put_be64(f, tlb->entries[i]);
+ }
+ return 0;
+}
+
+const VMStateInfo vmstate_info_hex_tlb_ptr = {
+ .name = "hex_tlb_pointer",
+ .get = get_hex_tlb_ptr,
+ .put = put_hex_tlb_ptr,
+};
const VMStateDescription vmstate_hexagon_cpu = {
@@ -27,6 +54,9 @@ const VMStateDescription vmstate_hexagon_cpu = {
VMSTATE_UINTTL(env.threadId, HexagonCPU),
VMSTATE_UINTTL(env.cause_code, HexagonCPU),
VMSTATE_UINTTL(env.wait_next_pc, HexagonCPU),
+ VMSTATE_POINTER(env.hex_tlb, HexagonCPU, 0,
+ vmstate_info_hex_tlb_ptr, CPUHexagonTLBContext *),
+
VMSTATE_END_OF_LIST()
},
};
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 71c137be30..9119e42ff7 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -944,7 +944,7 @@ static void hexagon_tr_init_disas_context(DisasContextBase *dcbase,
HexagonCPU *hex_cpu = env_archcpu(cpu_env(cs));
uint32_t hex_flags = dcbase->tb->flags;
- ctx->mem_idx = MMU_USER_IDX;
+ ctx->mem_idx = FIELD_EX32(hex_flags, TB_FLAGS, MMU_INDEX);
ctx->num_packets = 0;
ctx->num_insns = 0;
ctx->num_hvx_insns = 0;
diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build
index 3ec53010fa..aa729a3683 100644
--- a/target/hexagon/meson.build
+++ b/target/hexagon/meson.build
@@ -273,7 +273,8 @@ hexagon_ss.add(files(
# idef-generated-enabled-instructions
#
idef_parser_enabled = get_option('hexagon_idef_parser')
-if idef_parser_enabled and 'hexagon-linux-user' in target_dirs
+if idef_parser_enabled and ('hexagon-linux-user' in target_dirs or
+ 'hexagon-softmmu' in target_dirs)
idef_parser_input_generated = custom_target(
'idef_parser_input.h.inc',
output: 'idef_parser_input.h.inc',
--
2.34.1
On 1/3/25 06:26, Brian Cain wrote: > From: Brian Cain <bcain@quicinc.com> > > Co-authored-by: Taylor Simpson <ltaylorsimpson@gmail.com> > Co-authored-by: Michael Lambert <mlambert@quicinc.com> > Co-authored-by: Sid Manning <sidneym@quicinc.com> > Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> > Signed-off-by: Brian Cain <brian.cain@oss.qualcomm.com> > --- > target/hexagon/cpu-param.h | 4 + > target/hexagon/cpu.h | 13 + > target/hexagon/hex_mmu.h | 30 +++ > target/hexagon/internal.h | 3 + > target/hexagon/cpu.c | 27 +- > target/hexagon/hex_mmu.c | 528 +++++++++++++++++++++++++++++++++++++ > target/hexagon/machine.c | 30 +++ > target/hexagon/translate.c | 2 +- > target/hexagon/meson.build | 3 +- > 9 files changed, 637 insertions(+), 3 deletions(-) > create mode 100644 target/hexagon/hex_mmu.h > create mode 100644 target/hexagon/hex_mmu.c > diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c > index 34c39cecd9..7ff678195d 100644 > --- a/target/hexagon/cpu.c > +++ b/target/hexagon/cpu.c > @@ -28,6 +28,7 @@ > #include "exec/gdbstub.h" > #include "cpu_helper.h" > #include "max.h" > +#include "hex_mmu.h" > > #ifndef CONFIG_USER_ONLY > #include "sys_macros.h" > @@ -283,6 +284,18 @@ static void hexagon_restore_state_to_opc(CPUState *cs, > cpu_env(cs)->gpr[HEX_REG_PC] = data[0]; > } > > + > +#ifndef CONFIG_USER_ONLY > +static void mmu_reset(CPUHexagonState *env) > +{ > + CPUState *cs = env_cpu(env); > + if (cs->cpu_index == 0) { This doesn't scale to heterogeneous emulation. > + memset(env->hex_tlb, 0, sizeof(*env->hex_tlb)); > + } > +} > +#endif > + > + > static void hexagon_cpu_reset_hold(Object *obj, ResetType type) > { > CPUState *cs = CPU(obj); > @@ -310,6 +323,7 @@ static void hexagon_cpu_reset_hold(Object *obj, ResetType type) > if (cs->cpu_index == 0) { > arch_set_system_reg(env, HEX_SREG_MODECTL, 0x1); > } > + mmu_reset(env); > arch_set_system_reg(env, HEX_SREG_HTID, cs->cpu_index); > memset(env->t_sreg, 0, sizeof(target_ulong) * NUM_SREGS); > memset(env->greg, 0, sizeof(target_ulong) * NUM_GREGS); > @@ -341,6 +355,14 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp) > return; > } > > +#ifndef CONFIG_USER_ONLY > + HexagonCPU *cpu = HEXAGON_CPU(cs); > + if (cpu->num_tlbs > MAX_TLB_ENTRIES) { > + error_setg(errp, "Number of TLBs selected is invalid"); > + return; > + } > +#endif > + > gdb_register_coprocessor(cs, hexagon_hvx_gdb_read_register, > hexagon_hvx_gdb_write_register, > gdb_find_static_feature("hexagon-hvx.xml"), 0); > @@ -352,9 +374,12 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp) > #endif > > qemu_init_vcpu(cs); > +#ifndef CONFIG_USER_ONLY > + CPUHexagonState *env = cpu_env(cs); > + hex_mmu_realize(env); > +#endif > cpu_reset(cs); > #ifndef CONFIG_USER_ONLY > - CPUHexagonState *env = cpu_env(cs); > if (cs->cpu_index == 0) { > env->g_sreg = g_new0(target_ulong, NUM_SREGS); > } else { > diff --git a/target/hexagon/hex_mmu.c b/target/hexagon/hex_mmu.c > new file mode 100644 > index 0000000000..54c4ba2dbf > --- /dev/null > +++ b/target/hexagon/hex_mmu.c > @@ -0,0 +1,528 @@ > +/* > + * Copyright(c) 2019-2025 Qualcomm Innovation Center, Inc. All Rights Reserved. > + * > + * SPDX-License-Identifier: GPL-2.0-or-later > + */ > + > +#include "qemu/osdep.h" > +#include "qemu/main-loop.h" > +#include "qemu/qemu-print.h" > +#include "cpu.h" > +#include "system/cpus.h" > +#include "internal.h" > +#include "exec/exec-all.h" > +#include "hex_mmu.h" > +#include "macros.h" > +#include "sys_macros.h" > +#include "reg_fields.h" > + > +#define GET_TLB_FIELD(ENTRY, FIELD) \ > + ((uint64_t)fEXTRACTU_BITS(ENTRY, reg_field_info[FIELD].width, \ > + reg_field_info[FIELD].offset)) > + > +/* PPD (physical page descriptor) */ > +static inline uint64_t GET_PPD(uint64_t entry) > +{ > + return GET_TLB_FIELD(entry, PTE_PPD) | > + (GET_TLB_FIELD(entry, PTE_PA35) << reg_field_info[PTE_PPD].width); > +} > + > +#define NO_ASID (1 << 8) > + > +typedef enum { > + PGSIZE_4K, > + PGSIZE_16K, > + PGSIZE_64K, > + PGSIZE_256K, > + PGSIZE_1M, > + PGSIZE_4M, > + PGSIZE_16M, > + PGSIZE_64M, > + PGSIZE_256M, > + PGSIZE_1G, > + NUM_PGSIZE_TYPES Is NUM_PGSIZE_TYPES part of the enum? > +} tlb_pgsize_t; > + > +static const char *pgsize_str[NUM_PGSIZE_TYPES] = { > + "4K", > + "16K", > + "64K", > + "256K", > + "1M", > + "4M", > + "16M", > + "64M", > + "256M", > + "1G", > +}; > + > +#define INVALID_MASK 0xffffffffLL > + > +static const uint64_t encmask_2_mask[] = { > + 0x0fffLL, /* 4k, 0000 */ > + 0x3fffLL, /* 16k, 0001 */ > + 0xffffLL, /* 64k, 0010 */ > + 0x3ffffLL, /* 256k, 0011 */ > + 0xfffffLL, /* 1m, 0100 */ > + 0x3fffffLL, /* 4m, 0101 */ > + 0xffffffLL, /* 16m, 0110 */ > + 0x3ffffffLL, /* 64m, 0111 */ > + 0xfffffffLL, /* 256m, 1000 */ > + 0x3fffffffLL, /* 1g, 1001 */ > + INVALID_MASK, /* RSVD, 0111 */ > +}; > + > +/* > + * @return the page size type from @a entry. > + */ > +static inline tlb_pgsize_t hex_tlb_pgsize_type(uint64_t entry) > +{ > + if (entry == 0) { > + qemu_log_mask(CPU_LOG_MMU, "%s: Supplied TLB entry was 0!\n", __func__); > + return 0; > + } > + tlb_pgsize_t size = ctz64(entry); > + g_assert(size < NUM_PGSIZE_TYPES); > + return size; > +} > + > +/* > + * @return the page size of @a entry, in bytes. > + */ > +static inline uint64_t hex_tlb_page_size_bytes(uint64_t entry) > +{ > + return 1ull << (TARGET_PAGE_BITS + 2 * hex_tlb_pgsize_type(entry)); > +} > + > +static inline uint64_t hex_tlb_phys_page_num(uint64_t entry) > +{ > + uint32_t ppd = GET_PPD(entry); > + return ppd >> 1; > +} > + > +static inline uint64_t hex_tlb_phys_addr(uint64_t entry) > +{ > + uint64_t pagemask = encmask_2_mask[hex_tlb_pgsize_type(entry)]; > + uint64_t pagenum = hex_tlb_phys_page_num(entry); > + uint64_t PA = (pagenum << TARGET_PAGE_BITS) & (~pagemask); > + return PA; > +} > + > +static inline uint64_t hex_tlb_virt_addr(uint64_t entry) > +{ > + return (uint64_t)GET_TLB_FIELD(entry, PTE_VPN) << TARGET_PAGE_BITS; return vaddr type? > +} > + > +static bool hex_dump_mmu_entry(FILE *f, uint64_t entry) > +{ > + if (GET_TLB_FIELD(entry, PTE_V)) { > + fprintf(f, "0x%016" PRIx64 ": ", entry); > + uint64_t PA = hex_tlb_phys_addr(entry); > + uint64_t VA = hex_tlb_virt_addr(entry); > + fprintf(f, "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64, > + GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G), > + GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, PTE_ATR0)); > + fprintf(f, " ASID:0x%02" PRIx64 " VA:0x%08" PRIx64, > + GET_TLB_FIELD(entry, PTE_ASID), VA); > + fprintf(f, > + " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64 > + " C:%" PRId64, > + GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W), > + GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U), > + GET_TLB_FIELD(entry, PTE_C)); > + fprintf(f, " PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA, > + pgsize_str[hex_tlb_pgsize_type(entry)], > + hex_tlb_page_size_bytes(entry)); > + fprintf(f, "\n"); > + return true; > + } > + > + /* Not valid */ > + return false; > +} > + > +void dump_mmu(CPUHexagonState *env) > +{ > + int i; > + > + HexagonCPU *cpu = env_archcpu(env); > + for (i = 0; i < cpu->num_tlbs; i++) { > + uint64_t entry = env->hex_tlb->entries[i]; > + if (GET_TLB_FIELD(entry, PTE_V)) { > + qemu_printf("0x%016" PRIx64 ": ", entry); > + uint64_t PA = hex_tlb_phys_addr(entry); > + uint64_t VA = hex_tlb_virt_addr(entry); > + qemu_printf( > + "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64, > + GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G), > + GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, PTE_ATR0)); > + qemu_printf(" ASID:0x%02" PRIx64 " VA:0x%08" PRIx64, > + GET_TLB_FIELD(entry, PTE_ASID), VA); > + qemu_printf( > + " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64 > + " C:%" PRId64, > + GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W), > + GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U), > + GET_TLB_FIELD(entry, PTE_C)); > + qemu_printf(" PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA, > + pgsize_str[hex_tlb_pgsize_type(entry)], > + hex_tlb_page_size_bytes(entry)); > + qemu_printf("\n"); > + } > + } > +} > + > +static inline void hex_log_tlbw(uint32_t index, uint64_t entry) > +{ > + if (qemu_loglevel_mask(CPU_LOG_MMU)) { > + if (qemu_log_enabled()) { > + FILE *logfile = qemu_log_trylock(); > + if (logfile) { > + fprintf(logfile, "tlbw[%03d]: ", index); > + if (!hex_dump_mmu_entry(logfile, entry)) { > + fprintf(logfile, "invalid\n"); > + } > + qemu_log_unlock(logfile); > + } > + } > + } > +} > + > +void hex_tlbw(CPUHexagonState *env, uint32_t index, uint64_t value) > +{ > + uint32_t myidx = fTLB_NONPOW2WRAP(fTLB_IDXMASK(index)); > + bool old_entry_valid = GET_TLB_FIELD(env->hex_tlb->entries[myidx], PTE_V); > + if (old_entry_valid && hexagon_cpu_mmu_enabled(env)) { > + CPUState *cs = env_cpu(env); > + > + tlb_flush(cs); > + } > + env->hex_tlb->entries[myidx] = (value); > + hex_log_tlbw(myidx, value); > +} > + > +void hex_mmu_realize(CPUHexagonState *env) > +{ > + CPUState *cs = env_cpu(env); > + if (cs->cpu_index == 0) { Problem with heterogeneous emulation. > + env->hex_tlb = g_malloc0(sizeof(CPUHexagonTLBContext)); > + } else { > + CPUState *cpu0_s = NULL; > + CPUHexagonState *env0 = NULL; > + CPU_FOREACH(cpu0_s) { > + assert(cpu0_s->cpu_index == 0); > + env0 = &(HEXAGON_CPU(cpu0_s)->env); > + break; > + } > + env->hex_tlb = env0->hex_tlb; > + } > +} > + > +void hex_mmu_on(CPUHexagonState *env) > +{ > + CPUState *cs = env_cpu(env); > + qemu_log_mask(CPU_LOG_MMU, "Hexagon MMU turned on!\n"); > + tlb_flush(cs); > +} > + > +void hex_mmu_off(CPUHexagonState *env) > +{ > + CPUState *cs = env_cpu(env); > + qemu_log_mask(CPU_LOG_MMU, "Hexagon MMU turned off!\n"); > + tlb_flush(cs); > +} > + > +void hex_mmu_mode_change(CPUHexagonState *env) > +{ > + qemu_log_mask(CPU_LOG_MMU, "Hexagon mode change!\n"); > + CPUState *cs = env_cpu(env); > + tlb_flush(cs); > +} > + > +static inline bool hex_tlb_entry_match_noperm(uint64_t entry, uint32_t asid, > + uint64_t VA) > +{ > + if (GET_TLB_FIELD(entry, PTE_V)) { > + if (GET_TLB_FIELD(entry, PTE_G)) { > + /* Global entry - ingnore ASID */ > + } else if (asid != NO_ASID) { > + uint32_t tlb_asid = GET_TLB_FIELD(entry, PTE_ASID); > + if (tlb_asid != asid) { > + return false; > + } > + } > + > + uint64_t page_size = hex_tlb_page_size_bytes(entry); > + uint64_t page_start = > + ROUND_DOWN(hex_tlb_virt_addr(entry), page_size); > + if (page_start <= VA && VA < page_start + page_size) { > + return true; > + } > + } > + return false; > +} > + > +static inline void hex_tlb_entry_get_perm(CPUHexagonState *env, uint64_t entry, > + MMUAccessType access_type, > + int mmu_idx, int *prot, > + int32_t *excp) > +{ > + g_assert_not_reached(); > +} > + > +static inline bool hex_tlb_entry_match(CPUHexagonState *env, uint64_t entry, > + uint8_t asid, target_ulong VA, > + MMUAccessType access_type, hwaddr *PA, > + int *prot, int *size, int32_t *excp, > + int mmu_idx) > +{ > + if (hex_tlb_entry_match_noperm(entry, asid, VA)) { > + hex_tlb_entry_get_perm(env, entry, access_type, mmu_idx, prot, excp); > + *PA = hex_tlb_phys_addr(entry); > + *size = hex_tlb_page_size_bytes(entry); > + return true; > + } > + return false; > +} > + > +bool hex_tlb_find_match(CPUHexagonState *env, target_ulong VA, > + MMUAccessType access_type, hwaddr *PA, int *prot, > + int *size, int32_t *excp, int mmu_idx) > +{ > + *PA = 0; > + *prot = 0; > + *size = 0; > + *excp = 0; > + uint32_t ssr = arch_get_system_reg(env, HEX_SREG_SSR); > + uint8_t asid = GET_SSR_FIELD(SSR_ASID, ssr); > + int i; > + HexagonCPU *cpu = env_archcpu(env); > + for (i = 0; i < cpu->num_tlbs; i++) { > + uint64_t entry = env->hex_tlb->entries[i]; > + if (hex_tlb_entry_match(env, entry, asid, VA, access_type, PA, prot, > + size, excp, mmu_idx)) { > + return true; > + } > + } > + return false; > +} > + > +static uint32_t hex_tlb_lookup_by_asid(CPUHexagonState *env, uint32_t asid, > + uint32_t VA) > +{ > + g_assert_not_reached(); > +} > + > +/* Called from tlbp instruction */ > +uint32_t hex_tlb_lookup(CPUHexagonState *env, uint32_t ssr, uint32_t VA) > +{ > + return hex_tlb_lookup_by_asid(env, GET_SSR_FIELD(SSR_ASID, ssr), VA); > +} > + > +static bool hex_tlb_is_match(CPUHexagonState *env, > + uint64_t entry1, uint64_t entry2, > + bool consider_gbit) > +{ > + bool valid1 = GET_TLB_FIELD(entry1, PTE_V); > + bool valid2 = GET_TLB_FIELD(entry2, PTE_V); > + uint64_t size1 = hex_tlb_page_size_bytes(entry1); > + uint64_t vaddr1 = ROUND_DOWN(hex_tlb_virt_addr(entry1), size1); > + uint64_t size2 = hex_tlb_page_size_bytes(entry2); > + uint64_t vaddr2 = ROUND_DOWN(hex_tlb_virt_addr(entry2), size2); > + int asid1 = GET_TLB_FIELD(entry1, PTE_ASID); > + int asid2 = GET_TLB_FIELD(entry2, PTE_ASID); > + bool gbit1 = GET_TLB_FIELD(entry1, PTE_G); > + bool gbit2 = GET_TLB_FIELD(entry2, PTE_G); > + > + if (!valid1 || !valid2) { > + return false; > + } > + > + if (((vaddr1 <= vaddr2) && (vaddr2 < (vaddr1 + size1))) || > + ((vaddr2 <= vaddr1) && (vaddr1 < (vaddr2 + size2)))) { > + if (asid1 == asid2) { > + return true; > + } > + if ((consider_gbit && gbit1) || gbit2) { > + return true; > + } > + } > + return false; > +} > + > +/* > + * Return codes: > + * 0 or positive index of match > + * -1 multiple matches > + * -2 no match > + */ > +int hex_tlb_check_overlap(CPUHexagonState *env, uint64_t entry, uint64_t index) > +{ > + int matches = 0; > + int last_match = 0; > + int i; > + > + HexagonCPU *cpu = env_archcpu(env); > + for (i = 0; i < cpu->num_tlbs; i++) { > + if (hex_tlb_is_match(env, entry, env->hex_tlb->entries[i], false)) { > + matches++; > + last_match = i; > + } > + } > + > + if (matches == 1) { > + return last_match; > + } > + if (matches == 0) { > + return -2; > + } > + return -1; > +} > + > +static inline void print_thread(const char *str, CPUState *cs) > +{ > + g_assert(bql_locked()); > + CPUHexagonState *thread = cpu_env(cs); > + bool is_stopped = cpu_is_stopped(cs); > + int exe_mode = get_exe_mode(thread); > + hex_lock_state_t lock_state = thread->tlb_lock_state; > + qemu_log_mask(CPU_LOG_MMU, > + "%s: threadId = %d: %s, exe_mode = %s, tlb_lock_state = %s\n", > + str, > + thread->threadId, > + is_stopped ? "stopped" : "running", > + exe_mode == HEX_EXE_MODE_OFF ? "off" : > + exe_mode == HEX_EXE_MODE_RUN ? "run" : > + exe_mode == HEX_EXE_MODE_WAIT ? "wait" : > + exe_mode == HEX_EXE_MODE_DEBUG ? "debug" : > + "unknown", > + lock_state == HEX_LOCK_UNLOCKED ? "unlocked" : > + lock_state == HEX_LOCK_WAITING ? "waiting" : > + lock_state == HEX_LOCK_OWNER ? "owner" : > + "unknown"); > +} > + > +static inline void print_thread_states(const char *str) > +{ > + CPUState *cs; > + CPU_FOREACH(cs) { Ditto heterogeneous emulation. > + print_thread(str, cs); > + } > +} > + > +void hex_tlb_lock(CPUHexagonState *env) > +{ > + qemu_log_mask(CPU_LOG_MMU, "hex_tlb_lock: %d\n", env->threadId); > + BQL_LOCK_GUARD(); > + g_assert((env->tlb_lock_count == 0) || (env->tlb_lock_count == 1)); > + > + uint32_t syscfg = arch_get_system_reg(env, HEX_SREG_SYSCFG); > + uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg); > + if (tlb_lock) { > + if (env->tlb_lock_state == HEX_LOCK_QUEUED) { > + env->next_PC += 4; > + env->tlb_lock_count++; > + env->tlb_lock_state = HEX_LOCK_OWNER; > + SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 1); > + return; > + } > + if (env->tlb_lock_state == HEX_LOCK_OWNER) { > + qemu_log_mask(CPU_LOG_MMU | LOG_GUEST_ERROR, > + "Double tlblock at PC: 0x%x, thread may hang\n", > + env->next_PC); > + env->next_PC += 4; > + CPUState *cs = env_cpu(env); > + cpu_interrupt(cs, CPU_INTERRUPT_HALT); > + return; > + } > + env->tlb_lock_state = HEX_LOCK_WAITING; > + CPUState *cs = env_cpu(env); > + cpu_interrupt(cs, CPU_INTERRUPT_HALT); > + } else { > + env->next_PC += 4; > + env->tlb_lock_count++; > + env->tlb_lock_state = HEX_LOCK_OWNER; > + SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 1); > + } > + > + if (qemu_loglevel_mask(CPU_LOG_MMU)) { > + qemu_log_mask(CPU_LOG_MMU, "Threads after hex_tlb_lock:\n"); > + print_thread_states("\tThread"); > + } > +} > + > +void hex_tlb_unlock(CPUHexagonState *env) > +{ > + BQL_LOCK_GUARD(); > + g_assert((env->tlb_lock_count == 0) || (env->tlb_lock_count == 1)); > + > + /* Nothing to do if the TLB isn't locked by this thread */ > + uint32_t syscfg = arch_get_system_reg(env, HEX_SREG_SYSCFG); > + uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg); > + if ((tlb_lock == 0) || > + (env->tlb_lock_state != HEX_LOCK_OWNER)) { > + qemu_log_mask(LOG_GUEST_ERROR, > + "thread %d attempted to tlbunlock without having the " > + "lock, tlb_lock state = %d\n", > + env->threadId, env->tlb_lock_state); > + g_assert(env->tlb_lock_state != HEX_LOCK_WAITING); > + return; > + } > + > + env->tlb_lock_count--; > + env->tlb_lock_state = HEX_LOCK_UNLOCKED; > + SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 0); > + > + /* Look for a thread to unlock */ > + unsigned int this_threadId = env->threadId; > + CPUHexagonState *unlock_thread = NULL; > + CPUState *cs; > + CPU_FOREACH(cs) { Ditto. > + CPUHexagonState *thread = cpu_env(cs); > + > + /* > + * The hardware implements round-robin fairness, so we look for threads > + * starting at env->threadId + 1 and incrementing modulo the number of > + * threads. > + * > + * To implement this, we check if thread is a earlier in the modulo > + * sequence than unlock_thread. > + * if unlock thread is higher than this thread > + * thread must be between this thread and unlock_thread > + * else > + * thread higher than this thread is ahead of unlock_thread > + * thread must be lower then unlock thread > + */ > + if (thread->tlb_lock_state == HEX_LOCK_WAITING) { > + if (!unlock_thread) { > + unlock_thread = thread; > + } else if (unlock_thread->threadId > this_threadId) { > + if (this_threadId < thread->threadId && > + thread->threadId < unlock_thread->threadId) { > + unlock_thread = thread; > + } > + } else { > + if (thread->threadId > this_threadId) { > + unlock_thread = thread; > + } > + if (thread->threadId < unlock_thread->threadId) { > + unlock_thread = thread; > + } > + } > + } > + }
> -----Original Message----- > From: Philippe Mathieu-Daudé <philmd@linaro.org> > Sent: Wednesday, March 12, 2025 2:20 PM > To: Brian Cain <brian.cain@oss.qualcomm.com>; qemu-devel@nongnu.org > Cc: richard.henderson@linaro.org; Matheus Bernardino (QUIC) > <quic_mathbern@quicinc.com>; ale@rev.ng; anjo@rev.ng; Marco Liebel > (QUIC) <quic_mliebel@quicinc.com>; ltaylorsimpson@gmail.com; > alex.bennee@linaro.org; Mark Burton (QUIC) > <quic_mburton@quicinc.com>; Sid Manning <sidneym@quicinc.com>; Brian > Cain <bcain@quicinc.com>; Michael Lambert <mlambert@quicinc.com> > Subject: Re: [PATCH 34/38] target/hexagon: Add initial MMU model > > WARNING: This email originated from outside of Qualcomm. Please be wary > of any links or attachments, and do not enable macros. > > On 1/3/25 06:26, Brian Cain wrote: > > From: Brian Cain <bcain@quicinc.com> > > > > Co-authored-by: Taylor Simpson <ltaylorsimpson@gmail.com> > > Co-authored-by: Michael Lambert <mlambert@quicinc.com> > > Co-authored-by: Sid Manning <sidneym@quicinc.com> > > Co-authored-by: Matheus Tavares Bernardino > <quic_mathbern@quicinc.com> > > Signed-off-by: Brian Cain <brian.cain@oss.qualcomm.com> > > --- > > target/hexagon/cpu-param.h | 4 + > > target/hexagon/cpu.h | 13 + > > target/hexagon/hex_mmu.h | 30 +++ > > target/hexagon/internal.h | 3 + > > target/hexagon/cpu.c | 27 +- > > target/hexagon/hex_mmu.c | 528 > +++++++++++++++++++++++++++++++++++++ > > target/hexagon/machine.c | 30 +++ > > target/hexagon/translate.c | 2 +- > > target/hexagon/meson.build | 3 +- > > 9 files changed, 637 insertions(+), 3 deletions(-) > > create mode 100644 target/hexagon/hex_mmu.h > > create mode 100644 target/hexagon/hex_mmu.c > > > > diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index > > 34c39cecd9..7ff678195d 100644 > > --- a/target/hexagon/cpu.c > > +++ b/target/hexagon/cpu.c > > @@ -28,6 +28,7 @@ > > #include "exec/gdbstub.h" > > #include "cpu_helper.h" > > #include "max.h" > > +#include "hex_mmu.h" > > > > #ifndef CONFIG_USER_ONLY > > #include "sys_macros.h" > > @@ -283,6 +284,18 @@ static void > hexagon_restore_state_to_opc(CPUState *cs, > > cpu_env(cs)->gpr[HEX_REG_PC] = data[0]; > > } > > > > + > > +#ifndef CONFIG_USER_ONLY > > +static void mmu_reset(CPUHexagonState *env) { > > + CPUState *cs = env_cpu(env); > > + if (cs->cpu_index == 0) { > > This doesn't scale to heterogeneous emulation. [Sid Manning] Heterogeneous emulation, you mean a version of QEMU with something like ARM and Hexagon configured to run concurrently? I think we can substitute this with env->threadId, threadId is the same htid (hardware thread id) > > > + memset(env->hex_tlb, 0, sizeof(*env->hex_tlb)); > > + } > > +} > > +#endif > > + > > + > > static void hexagon_cpu_reset_hold(Object *obj, ResetType type) > > { > > CPUState *cs = CPU(obj); > > @@ -310,6 +323,7 @@ static void hexagon_cpu_reset_hold(Object *obj, > ResetType type) > > if (cs->cpu_index == 0) { > > arch_set_system_reg(env, HEX_SREG_MODECTL, 0x1); > > } > > + mmu_reset(env); > > arch_set_system_reg(env, HEX_SREG_HTID, cs->cpu_index); > > memset(env->t_sreg, 0, sizeof(target_ulong) * NUM_SREGS); > > memset(env->greg, 0, sizeof(target_ulong) * NUM_GREGS); @@ > > -341,6 +355,14 @@ static void hexagon_cpu_realize(DeviceState *dev, > Error **errp) > > return; > > } > > > > +#ifndef CONFIG_USER_ONLY > > + HexagonCPU *cpu = HEXAGON_CPU(cs); > > + if (cpu->num_tlbs > MAX_TLB_ENTRIES) { > > + error_setg(errp, "Number of TLBs selected is invalid"); > > + return; > > + } > > +#endif > > + > > gdb_register_coprocessor(cs, hexagon_hvx_gdb_read_register, > > hexagon_hvx_gdb_write_register, > > > > gdb_find_static_feature("hexagon-hvx.xml"), 0); @@ -352,9 +374,12 @@ > static void hexagon_cpu_realize(DeviceState *dev, Error **errp) > > #endif > > > > qemu_init_vcpu(cs); > > +#ifndef CONFIG_USER_ONLY > > + CPUHexagonState *env = cpu_env(cs); > > + hex_mmu_realize(env); > > +#endif > > cpu_reset(cs); > > #ifndef CONFIG_USER_ONLY > > - CPUHexagonState *env = cpu_env(cs); > > if (cs->cpu_index == 0) { > > env->g_sreg = g_new0(target_ulong, NUM_SREGS); > > } else { > > diff --git a/target/hexagon/hex_mmu.c b/target/hexagon/hex_mmu.c > new > > file mode 100644 index 0000000000..54c4ba2dbf > > --- /dev/null > > +++ b/target/hexagon/hex_mmu.c > > @@ -0,0 +1,528 @@ > > +/* > > + * Copyright(c) 2019-2025 Qualcomm Innovation Center, Inc. All Rights > Reserved. > > + * > > + * SPDX-License-Identifier: GPL-2.0-or-later */ > > + > > +#include "qemu/osdep.h" > > +#include "qemu/main-loop.h" > > +#include "qemu/qemu-print.h" > > +#include "cpu.h" > > +#include "system/cpus.h" > > +#include "internal.h" > > +#include "exec/exec-all.h" > > +#include "hex_mmu.h" > > +#include "macros.h" > > +#include "sys_macros.h" > > +#include "reg_fields.h" > > + > > +#define GET_TLB_FIELD(ENTRY, FIELD) \ > > + ((uint64_t)fEXTRACTU_BITS(ENTRY, reg_field_info[FIELD].width, \ > > + reg_field_info[FIELD].offset)) > > + > > +/* PPD (physical page descriptor) */ > > +static inline uint64_t GET_PPD(uint64_t entry) { > > + return GET_TLB_FIELD(entry, PTE_PPD) | > > + (GET_TLB_FIELD(entry, PTE_PA35) << > > +reg_field_info[PTE_PPD].width); } > > + > > +#define NO_ASID (1 << 8) > > + > > +typedef enum { > > + PGSIZE_4K, > > + PGSIZE_16K, > > + PGSIZE_64K, > > + PGSIZE_256K, > > + PGSIZE_1M, > > + PGSIZE_4M, > > + PGSIZE_16M, > > + PGSIZE_64M, > > + PGSIZE_256M, > > + PGSIZE_1G, > > + NUM_PGSIZE_TYPES > > Is NUM_PGSIZE_TYPES part of the enum? [Sid Manning] Could be, #define NUM_PGSIZE_TYPES (SHIFT_1G + 1) Instead. > > > +} tlb_pgsize_t; > > + > > +static const char *pgsize_str[NUM_PGSIZE_TYPES] = { > > + "4K", > > + "16K", > > + "64K", > > + "256K", > > + "1M", > > + "4M", > > + "16M", > > + "64M", > > + "256M", > > + "1G", > > +}; > > + > > +#define INVALID_MASK 0xffffffffLL > > + > > +static const uint64_t encmask_2_mask[] = { > > + 0x0fffLL, /* 4k, 0000 */ > > + 0x3fffLL, /* 16k, 0001 */ > > + 0xffffLL, /* 64k, 0010 */ > > + 0x3ffffLL, /* 256k, 0011 */ > > + 0xfffffLL, /* 1m, 0100 */ > > + 0x3fffffLL, /* 4m, 0101 */ > > + 0xffffffLL, /* 16m, 0110 */ > > + 0x3ffffffLL, /* 64m, 0111 */ > > + 0xfffffffLL, /* 256m, 1000 */ > > + 0x3fffffffLL, /* 1g, 1001 */ > > + INVALID_MASK, /* RSVD, 0111 */ > > +}; > > + > > +/* > > + * @return the page size type from @a entry. > > + */ > > +static inline tlb_pgsize_t hex_tlb_pgsize_type(uint64_t entry) { > > + if (entry == 0) { > > + qemu_log_mask(CPU_LOG_MMU, "%s: Supplied TLB entry was 0!\n", > __func__); > > + return 0; > > + } > > + tlb_pgsize_t size = ctz64(entry); > > + g_assert(size < NUM_PGSIZE_TYPES); > > + return size; > > +} > > + > > +/* > > + * @return the page size of @a entry, in bytes. > > + */ > > +static inline uint64_t hex_tlb_page_size_bytes(uint64_t entry) { > > + return 1ull << (TARGET_PAGE_BITS + 2 * > > +hex_tlb_pgsize_type(entry)); } > > + > > +static inline uint64_t hex_tlb_phys_page_num(uint64_t entry) { > > + uint32_t ppd = GET_PPD(entry); > > + return ppd >> 1; > > +} > > + > > +static inline uint64_t hex_tlb_phys_addr(uint64_t entry) { > > + uint64_t pagemask = encmask_2_mask[hex_tlb_pgsize_type(entry)]; > > + uint64_t pagenum = hex_tlb_phys_page_num(entry); > > + uint64_t PA = (pagenum << TARGET_PAGE_BITS) & (~pagemask); > > + return PA; > > +} > > + > > +static inline uint64_t hex_tlb_virt_addr(uint64_t entry) { > > + return (uint64_t)GET_TLB_FIELD(entry, PTE_VPN) << > > +TARGET_PAGE_BITS; > > return vaddr type? Yeah, I think so. > > > +} > > + > > +static bool hex_dump_mmu_entry(FILE *f, uint64_t entry) { > > + if (GET_TLB_FIELD(entry, PTE_V)) { > > + fprintf(f, "0x%016" PRIx64 ": ", entry); > > + uint64_t PA = hex_tlb_phys_addr(entry); > > + uint64_t VA = hex_tlb_virt_addr(entry); > > + fprintf(f, "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64, > > + GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G), > > + GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, > PTE_ATR0)); > > + fprintf(f, " ASID:0x%02" PRIx64 " VA:0x%08" PRIx64, > > + GET_TLB_FIELD(entry, PTE_ASID), VA); > > + fprintf(f, > > + " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64 > > + " C:%" PRId64, > > + GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W), > > + GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U), > > + GET_TLB_FIELD(entry, PTE_C)); > > + fprintf(f, " PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA, > > + pgsize_str[hex_tlb_pgsize_type(entry)], > > + hex_tlb_page_size_bytes(entry)); > > + fprintf(f, "\n"); > > + return true; > > + } > > + > > + /* Not valid */ > > + return false; > > +} > > + > > +void dump_mmu(CPUHexagonState *env) > > +{ > > + int i; > > + > > + HexagonCPU *cpu = env_archcpu(env); > > + for (i = 0; i < cpu->num_tlbs; i++) { > > + uint64_t entry = env->hex_tlb->entries[i]; > > + if (GET_TLB_FIELD(entry, PTE_V)) { > > + qemu_printf("0x%016" PRIx64 ": ", entry); > > + uint64_t PA = hex_tlb_phys_addr(entry); > > + uint64_t VA = hex_tlb_virt_addr(entry); > > + qemu_printf( > > + "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64, > > + GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G), > > + GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, > PTE_ATR0)); > > + qemu_printf(" ASID:0x%02" PRIx64 " VA:0x%08" PRIx64, > > + GET_TLB_FIELD(entry, PTE_ASID), VA); > > + qemu_printf( > > + " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64 > > + " C:%" PRId64, > > + GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W), > > + GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U), > > + GET_TLB_FIELD(entry, PTE_C)); > > + qemu_printf(" PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA, > > + pgsize_str[hex_tlb_pgsize_type(entry)], > > + hex_tlb_page_size_bytes(entry)); > > + qemu_printf("\n"); > > + } > > + } > > +} > > + > > +static inline void hex_log_tlbw(uint32_t index, uint64_t entry) { > > + if (qemu_loglevel_mask(CPU_LOG_MMU)) { > > + if (qemu_log_enabled()) { > > + FILE *logfile = qemu_log_trylock(); > > + if (logfile) { > > + fprintf(logfile, "tlbw[%03d]: ", index); > > + if (!hex_dump_mmu_entry(logfile, entry)) { > > + fprintf(logfile, "invalid\n"); > > + } > > + qemu_log_unlock(logfile); > > + } > > + } > > + } > > +} > > + > > +void hex_tlbw(CPUHexagonState *env, uint32_t index, uint64_t value) { > > + uint32_t myidx = fTLB_NONPOW2WRAP(fTLB_IDXMASK(index)); > > + bool old_entry_valid = GET_TLB_FIELD(env->hex_tlb->entries[myidx], > PTE_V); > > + if (old_entry_valid && hexagon_cpu_mmu_enabled(env)) { > > + CPUState *cs = env_cpu(env); > > + > > + tlb_flush(cs); > > + } > > + env->hex_tlb->entries[myidx] = (value); > > + hex_log_tlbw(myidx, value); > > +} > > + > > +void hex_mmu_realize(CPUHexagonState *env) { > > + CPUState *cs = env_cpu(env); > > + if (cs->cpu_index == 0) { > > Problem with heterogeneous emulation. > > > + env->hex_tlb = g_malloc0(sizeof(CPUHexagonTLBContext)); > > + } else { > > + CPUState *cpu0_s = NULL; > > + CPUHexagonState *env0 = NULL; > > + CPU_FOREACH(cpu0_s) { > > + assert(cpu0_s->cpu_index == 0); > > + env0 = &(HEXAGON_CPU(cpu0_s)->env); > > + break; > > + } > > + env->hex_tlb = env0->hex_tlb; > > + } > > +} > > + > > +void hex_mmu_on(CPUHexagonState *env) { > > + CPUState *cs = env_cpu(env); > > + qemu_log_mask(CPU_LOG_MMU, "Hexagon MMU turned on!\n"); > > + tlb_flush(cs); > > +} > > + > > +void hex_mmu_off(CPUHexagonState *env) { > > + CPUState *cs = env_cpu(env); > > + qemu_log_mask(CPU_LOG_MMU, "Hexagon MMU turned off!\n"); > > + tlb_flush(cs); > > +} > > + > > +void hex_mmu_mode_change(CPUHexagonState *env) { > > + qemu_log_mask(CPU_LOG_MMU, "Hexagon mode change!\n"); > > + CPUState *cs = env_cpu(env); > > + tlb_flush(cs); > > +} > > + > > +static inline bool hex_tlb_entry_match_noperm(uint64_t entry, uint32_t > asid, > > + uint64_t VA) { > > + if (GET_TLB_FIELD(entry, PTE_V)) { > > + if (GET_TLB_FIELD(entry, PTE_G)) { > > + /* Global entry - ingnore ASID */ > > + } else if (asid != NO_ASID) { > > + uint32_t tlb_asid = GET_TLB_FIELD(entry, PTE_ASID); > > + if (tlb_asid != asid) { > > + return false; > > + } > > + } > > + > > + uint64_t page_size = hex_tlb_page_size_bytes(entry); > > + uint64_t page_start = > > + ROUND_DOWN(hex_tlb_virt_addr(entry), page_size); > > + if (page_start <= VA && VA < page_start + page_size) { > > + return true; > > + } > > + } > > + return false; > > +} > > + > > +static inline void hex_tlb_entry_get_perm(CPUHexagonState *env, > uint64_t entry, > > + MMUAccessType access_type, > > + int mmu_idx, int *prot, > > + int32_t *excp) { > > + g_assert_not_reached(); > > +} > > + > > +static inline bool hex_tlb_entry_match(CPUHexagonState *env, uint64_t > entry, > > + uint8_t asid, target_ulong VA, > > + MMUAccessType access_type, hwaddr *PA, > > + int *prot, int *size, int32_t *excp, > > + int mmu_idx) { > > + if (hex_tlb_entry_match_noperm(entry, asid, VA)) { > > + hex_tlb_entry_get_perm(env, entry, access_type, mmu_idx, prot, > excp); > > + *PA = hex_tlb_phys_addr(entry); > > + *size = hex_tlb_page_size_bytes(entry); > > + return true; > > + } > > + return false; > > +} > > + > > +bool hex_tlb_find_match(CPUHexagonState *env, target_ulong VA, > > + MMUAccessType access_type, hwaddr *PA, int *prot, > > + int *size, int32_t *excp, int mmu_idx) { > > + *PA = 0; > > + *prot = 0; > > + *size = 0; > > + *excp = 0; > > + uint32_t ssr = arch_get_system_reg(env, HEX_SREG_SSR); > > + uint8_t asid = GET_SSR_FIELD(SSR_ASID, ssr); > > + int i; > > + HexagonCPU *cpu = env_archcpu(env); > > + for (i = 0; i < cpu->num_tlbs; i++) { > > + uint64_t entry = env->hex_tlb->entries[i]; > > + if (hex_tlb_entry_match(env, entry, asid, VA, access_type, PA, prot, > > + size, excp, mmu_idx)) { > > + return true; > > + } > > + } > > + return false; > > +} > > + > > +static uint32_t hex_tlb_lookup_by_asid(CPUHexagonState *env, uint32_t > asid, > > + uint32_t VA) { > > + g_assert_not_reached(); > > +} > > + > > +/* Called from tlbp instruction */ > > +uint32_t hex_tlb_lookup(CPUHexagonState *env, uint32_t ssr, uint32_t > > +VA) { > > + return hex_tlb_lookup_by_asid(env, GET_SSR_FIELD(SSR_ASID, ssr), > > +VA); } > > + > > +static bool hex_tlb_is_match(CPUHexagonState *env, > > + uint64_t entry1, uint64_t entry2, > > + bool consider_gbit) { > > + bool valid1 = GET_TLB_FIELD(entry1, PTE_V); > > + bool valid2 = GET_TLB_FIELD(entry2, PTE_V); > > + uint64_t size1 = hex_tlb_page_size_bytes(entry1); > > + uint64_t vaddr1 = ROUND_DOWN(hex_tlb_virt_addr(entry1), size1); > > + uint64_t size2 = hex_tlb_page_size_bytes(entry2); > > + uint64_t vaddr2 = ROUND_DOWN(hex_tlb_virt_addr(entry2), size2); > > + int asid1 = GET_TLB_FIELD(entry1, PTE_ASID); > > + int asid2 = GET_TLB_FIELD(entry2, PTE_ASID); > > + bool gbit1 = GET_TLB_FIELD(entry1, PTE_G); > > + bool gbit2 = GET_TLB_FIELD(entry2, PTE_G); > > + > > + if (!valid1 || !valid2) { > > + return false; > > + } > > + > > + if (((vaddr1 <= vaddr2) && (vaddr2 < (vaddr1 + size1))) || > > + ((vaddr2 <= vaddr1) && (vaddr1 < (vaddr2 + size2)))) { > > + if (asid1 == asid2) { > > + return true; > > + } > > + if ((consider_gbit && gbit1) || gbit2) { > > + return true; > > + } > > + } > > + return false; > > +} > > + > > +/* > > + * Return codes: > > + * 0 or positive index of match > > + * -1 multiple matches > > + * -2 no match > > + */ > > +int hex_tlb_check_overlap(CPUHexagonState *env, uint64_t entry, > > +uint64_t index) { > > + int matches = 0; > > + int last_match = 0; > > + int i; > > + > > + HexagonCPU *cpu = env_archcpu(env); > > + for (i = 0; i < cpu->num_tlbs; i++) { > > + if (hex_tlb_is_match(env, entry, env->hex_tlb->entries[i], false)) { > > + matches++; > > + last_match = i; > > + } > > + } > > + > > + if (matches == 1) { > > + return last_match; > > + } > > + if (matches == 0) { > > + return -2; > > + } > > + return -1; > > +} > > + > > +static inline void print_thread(const char *str, CPUState *cs) { > > + g_assert(bql_locked()); > > + CPUHexagonState *thread = cpu_env(cs); > > + bool is_stopped = cpu_is_stopped(cs); > > + int exe_mode = get_exe_mode(thread); > > + hex_lock_state_t lock_state = thread->tlb_lock_state; > > + qemu_log_mask(CPU_LOG_MMU, > > + "%s: threadId = %d: %s, exe_mode = %s, tlb_lock_state = %s\n", > > + str, > > + thread->threadId, > > + is_stopped ? "stopped" : "running", > > + exe_mode == HEX_EXE_MODE_OFF ? "off" : > > + exe_mode == HEX_EXE_MODE_RUN ? "run" : > > + exe_mode == HEX_EXE_MODE_WAIT ? "wait" : > > + exe_mode == HEX_EXE_MODE_DEBUG ? "debug" : > > + "unknown", > > + lock_state == HEX_LOCK_UNLOCKED ? "unlocked" : > > + lock_state == HEX_LOCK_WAITING ? "waiting" : > > + lock_state == HEX_LOCK_OWNER ? "owner" : > > + "unknown"); > > +} > > + > > +static inline void print_thread_states(const char *str) { > > + CPUState *cs; > > + CPU_FOREACH(cs) { > > Ditto heterogeneous emulation. OK, we could pass CPUHexagonState and do the following: static inline void print_thread_states(CPUHexagonState *env, const char *str) { CPUState *cs = env_cpu(env); CPU_FOREACH(cs) { print_thread(str, cs); } } > > > + print_thread(str, cs); > > + } > > +} > > + > > +void hex_tlb_lock(CPUHexagonState *env) { > > + qemu_log_mask(CPU_LOG_MMU, "hex_tlb_lock: %d\n", env- > >threadId); > > + BQL_LOCK_GUARD(); > > + g_assert((env->tlb_lock_count == 0) || (env->tlb_lock_count == > > +1)); > > + > > + uint32_t syscfg = arch_get_system_reg(env, HEX_SREG_SYSCFG); > > + uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg); > > + if (tlb_lock) { > > + if (env->tlb_lock_state == HEX_LOCK_QUEUED) { > > + env->next_PC += 4; > > + env->tlb_lock_count++; > > + env->tlb_lock_state = HEX_LOCK_OWNER; > > + SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 1); > > + return; > > + } > > + if (env->tlb_lock_state == HEX_LOCK_OWNER) { > > + qemu_log_mask(CPU_LOG_MMU | LOG_GUEST_ERROR, > > + "Double tlblock at PC: 0x%x, thread may hang\n", > > + env->next_PC); > > + env->next_PC += 4; > > + CPUState *cs = env_cpu(env); > > + cpu_interrupt(cs, CPU_INTERRUPT_HALT); > > + return; > > + } > > + env->tlb_lock_state = HEX_LOCK_WAITING; > > + CPUState *cs = env_cpu(env); > > + cpu_interrupt(cs, CPU_INTERRUPT_HALT); > > + } else { > > + env->next_PC += 4; > > + env->tlb_lock_count++; > > + env->tlb_lock_state = HEX_LOCK_OWNER; > > + SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 1); > > + } > > + > > + if (qemu_loglevel_mask(CPU_LOG_MMU)) { > > + qemu_log_mask(CPU_LOG_MMU, "Threads after hex_tlb_lock:\n"); > > + print_thread_states("\tThread"); > > + } > > +} > > + > > +void hex_tlb_unlock(CPUHexagonState *env) { > > + BQL_LOCK_GUARD(); > > + g_assert((env->tlb_lock_count == 0) || (env->tlb_lock_count == > > +1)); > > + > > + /* Nothing to do if the TLB isn't locked by this thread */ > > + uint32_t syscfg = arch_get_system_reg(env, HEX_SREG_SYSCFG); > > + uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg); > > + if ((tlb_lock == 0) || > > + (env->tlb_lock_state != HEX_LOCK_OWNER)) { > > + qemu_log_mask(LOG_GUEST_ERROR, > > + "thread %d attempted to tlbunlock without having the " > > + "lock, tlb_lock state = %d\n", > > + env->threadId, env->tlb_lock_state); > > + g_assert(env->tlb_lock_state != HEX_LOCK_WAITING); > > + return; > > + } > > + > > + env->tlb_lock_count--; > > + env->tlb_lock_state = HEX_LOCK_UNLOCKED; > > + SET_SYSCFG_FIELD(env, SYSCFG_TLBLOCK, 0); > > + > > + /* Look for a thread to unlock */ > > + unsigned int this_threadId = env->threadId; > > + CPUHexagonState *unlock_thread = NULL; > > + CPUState *cs; > > + CPU_FOREACH(cs) { > > Ditto. > > > + CPUHexagonState *thread = cpu_env(cs); > > + > > + /* > > + * The hardware implements round-robin fairness, so we look for > threads > > + * starting at env->threadId + 1 and incrementing modulo the number > of > > + * threads. > > + * > > + * To implement this, we check if thread is a earlier in the modulo > > + * sequence than unlock_thread. > > + * if unlock thread is higher than this thread > > + * thread must be between this thread and unlock_thread > > + * else > > + * thread higher than this thread is ahead of unlock_thread > > + * thread must be lower then unlock thread > > + */ > > + if (thread->tlb_lock_state == HEX_LOCK_WAITING) { > > + if (!unlock_thread) { > > + unlock_thread = thread; > > + } else if (unlock_thread->threadId > this_threadId) { > > + if (this_threadId < thread->threadId && > > + thread->threadId < unlock_thread->threadId) { > > + unlock_thread = thread; > > + } > > + } else { > > + if (thread->threadId > this_threadId) { > > + unlock_thread = thread; > > + } > > + if (thread->threadId < unlock_thread->threadId) { > > + unlock_thread = thread; > > + } > > + } > > + } > > + }
On 12/3/25 22:15, Sid Manning wrote: > > >> -----Original Message----- >> From: Philippe Mathieu-Daudé <philmd@linaro.org> >> Sent: Wednesday, March 12, 2025 2:20 PM >> To: Brian Cain <brian.cain@oss.qualcomm.com>; qemu-devel@nongnu.org >> Cc: richard.henderson@linaro.org; Matheus Bernardino (QUIC) >> <quic_mathbern@quicinc.com>; ale@rev.ng; anjo@rev.ng; Marco Liebel >> (QUIC) <quic_mliebel@quicinc.com>; ltaylorsimpson@gmail.com; >> alex.bennee@linaro.org; Mark Burton (QUIC) >> <quic_mburton@quicinc.com>; Sid Manning <sidneym@quicinc.com>; Brian >> Cain <bcain@quicinc.com>; Michael Lambert <mlambert@quicinc.com> >> Subject: Re: [PATCH 34/38] target/hexagon: Add initial MMU model >> >> WARNING: This email originated from outside of Qualcomm. Please be wary >> of any links or attachments, and do not enable macros. >> >> On 1/3/25 06:26, Brian Cain wrote: >>> From: Brian Cain <bcain@quicinc.com> >>> >>> Co-authored-by: Taylor Simpson <ltaylorsimpson@gmail.com> >>> Co-authored-by: Michael Lambert <mlambert@quicinc.com> >>> Co-authored-by: Sid Manning <sidneym@quicinc.com> >>> Co-authored-by: Matheus Tavares Bernardino >> <quic_mathbern@quicinc.com> >>> Signed-off-by: Brian Cain <brian.cain@oss.qualcomm.com> >>> --- >>> target/hexagon/cpu-param.h | 4 + >>> target/hexagon/cpu.h | 13 + >>> target/hexagon/hex_mmu.h | 30 +++ >>> target/hexagon/internal.h | 3 + >>> target/hexagon/cpu.c | 27 +- >>> target/hexagon/hex_mmu.c | 528 >> +++++++++++++++++++++++++++++++++++++ >>> target/hexagon/machine.c | 30 +++ >>> target/hexagon/translate.c | 2 +- >>> target/hexagon/meson.build | 3 +- >>> 9 files changed, 637 insertions(+), 3 deletions(-) >>> create mode 100644 target/hexagon/hex_mmu.h >>> create mode 100644 target/hexagon/hex_mmu.c >> >> >>> diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index >>> 34c39cecd9..7ff678195d 100644 >>> --- a/target/hexagon/cpu.c >>> +++ b/target/hexagon/cpu.c >>> @@ -28,6 +28,7 @@ >>> #include "exec/gdbstub.h" >>> #include "cpu_helper.h" >>> #include "max.h" >>> +#include "hex_mmu.h" >>> >>> #ifndef CONFIG_USER_ONLY >>> #include "sys_macros.h" >>> @@ -283,6 +284,18 @@ static void >> hexagon_restore_state_to_opc(CPUState *cs, >>> cpu_env(cs)->gpr[HEX_REG_PC] = data[0]; >>> } >>> >>> + >>> +#ifndef CONFIG_USER_ONLY >>> +static void mmu_reset(CPUHexagonState *env) { >>> + CPUState *cs = env_cpu(env); >>> + if (cs->cpu_index == 0) { >> >> This doesn't scale to heterogeneous emulation. > [Sid Manning] > Heterogeneous emulation, you mean a version of QEMU with something like ARM and Hexagon configured to run concurrently? Yes. > I think we can substitute this with env->threadId, threadId is the same htid (hardware thread id) That looks safer (and could be your CPUClass::get_arch_id implementation).
> -----Original Message----- > From: Brian Cain <brian.cain@oss.qualcomm.com> > Sent: Friday, February 28, 2025 11:26 PM > To: qemu-devel@nongnu.org > Cc: brian.cain@oss.qualcomm.com; richard.henderson@linaro.org; > philmd@linaro.org; quic_mathbern@quicinc.com; ale@rev.ng; anjo@rev.ng; > quic_mliebel@quicinc.com; ltaylorsimpson@gmail.com; > alex.bennee@linaro.org; quic_mburton@quicinc.com; > sidneym@quicinc.com; Brian Cain <bcain@quicinc.com>; Michael Lambert > <mlambert@quicinc.com> > Subject: [PATCH 34/38] target/hexagon: Add initial MMU model > > From: Brian Cain <bcain@quicinc.com> > > Co-authored-by: Taylor Simpson <ltaylorsimpson@gmail.com> > Co-authored-by: Michael Lambert <mlambert@quicinc.com> > Co-authored-by: Sid Manning <sidneym@quicinc.com> > Co-authored-by: Matheus Tavares Bernardino > <quic_mathbern@quicinc.com> > Signed-off-by: Brian Cain <brian.cain@oss.qualcomm.com> > diff --git a/target/hexagon/hex_mmu.c b/target/hexagon/hex_mmu.c new > file mode 100644 index 0000000000..54c4ba2dbf > --- /dev/null > +++ b/target/hexagon/hex_mmu.c > @@ -0,0 +1,528 @@ > +/* > + * Copyright(c) 2019-2025 Qualcomm Innovation Center, Inc. All Rights > Reserved. > + * > + * SPDX-License-Identifier: GPL-2.0-or-later */ > + > +#include "qemu/osdep.h" > +#include "qemu/main-loop.h" > +#include "qemu/qemu-print.h" > +#include "cpu.h" > +#include "system/cpus.h" > +#include "internal.h" > +#include "exec/exec-all.h" > +#include "hex_mmu.h" > +#include "macros.h" > +#include "sys_macros.h" > +#include "reg_fields.h" > + > +#define GET_TLB_FIELD(ENTRY, FIELD) \ > + ((uint64_t)fEXTRACTU_BITS(ENTRY, reg_field_info[FIELD].width, \ > + reg_field_info[FIELD].offset)) > + > +/* PPD (physical page descriptor) */ > +static inline uint64_t GET_PPD(uint64_t entry) { > + return GET_TLB_FIELD(entry, PTE_PPD) | > + (GET_TLB_FIELD(entry, PTE_PA35) << > +reg_field_info[PTE_PPD].width); } > + > +#define NO_ASID (1 << 8) > + > +typedef enum { > + PGSIZE_4K, > + PGSIZE_16K, > + PGSIZE_64K, > + PGSIZE_256K, > + PGSIZE_1M, > + PGSIZE_4M, > + PGSIZE_16M, > + PGSIZE_64M, > + PGSIZE_256M, > + PGSIZE_1G, > + NUM_PGSIZE_TYPES > +} tlb_pgsize_t; > + > +static const char *pgsize_str[NUM_PGSIZE_TYPES] = { > + "4K", > + "16K", > + "64K", > + "256K", > + "1M", > + "4M", > + "16M", > + "64M", > + "256M", > + "1G", > +}; > + > +#define INVALID_MASK 0xffffffffLL > + > +static const uint64_t encmask_2_mask[] = { > + 0x0fffLL, /* 4k, 0000 */ > + 0x3fffLL, /* 16k, 0001 */ > + 0xffffLL, /* 64k, 0010 */ > + 0x3ffffLL, /* 256k, 0011 */ > + 0xfffffLL, /* 1m, 0100 */ > + 0x3fffffLL, /* 4m, 0101 */ > + 0xffffffLL, /* 16m, 0110 */ > + 0x3ffffffLL, /* 64m, 0111 */ > + 0xfffffffLL, /* 256m, 1000 */ > + 0x3fffffffLL, /* 1g, 1001 */ > + INVALID_MASK, /* RSVD, 0111 */ > +}; > + > +/* > + * @return the page size type from @a entry. > + */ > +static inline tlb_pgsize_t hex_tlb_pgsize_type(uint64_t entry) { > + if (entry == 0) { > + qemu_log_mask(CPU_LOG_MMU, "%s: Supplied TLB entry was 0!\n", > __func__); > + return 0; > + } > + tlb_pgsize_t size = ctz64(entry); > + g_assert(size < NUM_PGSIZE_TYPES); > + return size; > +} > + > +/* > + * @return the page size of @a entry, in bytes. > + */ > +static inline uint64_t hex_tlb_page_size_bytes(uint64_t entry) { > + return 1ull << (TARGET_PAGE_BITS + 2 * hex_tlb_pgsize_type(entry)); > +} > + > +static inline uint64_t hex_tlb_phys_page_num(uint64_t entry) { > + uint32_t ppd = GET_PPD(entry); > + return ppd >> 1; > +} > + > +static inline uint64_t hex_tlb_phys_addr(uint64_t entry) { > + uint64_t pagemask = encmask_2_mask[hex_tlb_pgsize_type(entry)]; > + uint64_t pagenum = hex_tlb_phys_page_num(entry); > + uint64_t PA = (pagenum << TARGET_PAGE_BITS) & (~pagemask); > + return PA; > +} > + > +static inline uint64_t hex_tlb_virt_addr(uint64_t entry) { > + return (uint64_t)GET_TLB_FIELD(entry, PTE_VPN) << > TARGET_PAGE_BITS; > +} > + > +static bool hex_dump_mmu_entry(FILE *f, uint64_t entry) { > + if (GET_TLB_FIELD(entry, PTE_V)) { > + fprintf(f, "0x%016" PRIx64 ": ", entry); > + uint64_t PA = hex_tlb_phys_addr(entry); > + uint64_t VA = hex_tlb_virt_addr(entry); > + fprintf(f, "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64, > + GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G), > + GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, > PTE_ATR0)); > + fprintf(f, " ASID:0x%02" PRIx64 " VA:0x%08" PRIx64, > + GET_TLB_FIELD(entry, PTE_ASID), VA); > + fprintf(f, > + " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64 > + " C:%" PRId64, > + GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W), > + GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U), > + GET_TLB_FIELD(entry, PTE_C)); > + fprintf(f, " PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA, > + pgsize_str[hex_tlb_pgsize_type(entry)], > + hex_tlb_page_size_bytes(entry)); > + fprintf(f, "\n"); > + return true; > + } > + > + /* Not valid */ > + return false; > +} > + > +void dump_mmu(CPUHexagonState *env) > +{ > + int i; > + > + HexagonCPU *cpu = env_archcpu(env); > + for (i = 0; i < cpu->num_tlbs; i++) { > + uint64_t entry = env->hex_tlb->entries[i]; > + if (GET_TLB_FIELD(entry, PTE_V)) { > + qemu_printf("0x%016" PRIx64 ": ", entry); > + uint64_t PA = hex_tlb_phys_addr(entry); > + uint64_t VA = hex_tlb_virt_addr(entry); > + qemu_printf( > + "V:%" PRId64 " G:%" PRId64 " A1:%" PRId64 " A0:%" PRId64, > + GET_TLB_FIELD(entry, PTE_V), GET_TLB_FIELD(entry, PTE_G), > + GET_TLB_FIELD(entry, PTE_ATR1), GET_TLB_FIELD(entry, > PTE_ATR0)); > + qemu_printf(" ASID:0x%02" PRIx64 " VA:0x%08" PRIx64, > + GET_TLB_FIELD(entry, PTE_ASID), VA); > + qemu_printf( > + " X:%" PRId64 " W:%" PRId64 " R:%" PRId64 " U:%" PRId64 > + " C:%" PRId64, > + GET_TLB_FIELD(entry, PTE_X), GET_TLB_FIELD(entry, PTE_W), > + GET_TLB_FIELD(entry, PTE_R), GET_TLB_FIELD(entry, PTE_U), > + GET_TLB_FIELD(entry, PTE_C)); > + qemu_printf(" PA:0x%09" PRIx64 " SZ:%s (0x%" PRIx64 ")", PA, > + pgsize_str[hex_tlb_pgsize_type(entry)], > + hex_tlb_page_size_bytes(entry)); > + qemu_printf("\n"); Use hex_dump_mmu_entry instead. > + } > + } > +} > + > +static inline void hex_log_tlbw(uint32_t index, uint64_t entry) { > + if (qemu_loglevel_mask(CPU_LOG_MMU)) { > + if (qemu_log_enabled()) { > + FILE *logfile = qemu_log_trylock(); > + if (logfile) { > + fprintf(logfile, "tlbw[%03d]: ", index); > + if (!hex_dump_mmu_entry(logfile, entry)) { > + fprintf(logfile, "invalid\n"); > + } > + qemu_log_unlock(logfile); > + } > + } > + } > +} > + > +void hex_tlbw(CPUHexagonState *env, uint32_t index, uint64_t value) { > + uint32_t myidx = fTLB_NONPOW2WRAP(fTLB_IDXMASK(index)); > + bool old_entry_valid = GET_TLB_FIELD(env->hex_tlb->entries[myidx], > PTE_V); > + if (old_entry_valid && hexagon_cpu_mmu_enabled(env)) { > + CPUState *cs = env_cpu(env); > + > + tlb_flush(cs); > + } > + env->hex_tlb->entries[myidx] = (value); > + hex_log_tlbw(myidx, value); > +} > + > +void hex_mmu_realize(CPUHexagonState *env) { > + CPUState *cs = env_cpu(env); > + if (cs->cpu_index == 0) { > + env->hex_tlb = g_malloc0(sizeof(CPUHexagonTLBContext)); > + } else { > + CPUState *cpu0_s = NULL; > + CPUHexagonState *env0 = NULL; > + CPU_FOREACH(cpu0_s) { > + assert(cpu0_s->cpu_index == 0); > + env0 = &(HEXAGON_CPU(cpu0_s)->env); > + break; > + } Seems fragile to assume cpu_index == 0 will be first in CPU_FOREACH. This would be better CPU_FOREACH(cpu0_s) { if (cpu0_s->cpu_index == 0) { env0 = &(HEXAGON_CPU(cpu0_s)->env); break; } } g_assert(env0); /* Make sure we found it */ > + env->hex_tlb = env0->hex_tlb; > + } > +} > diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build > index 3ec53010fa..aa729a3683 100644 > --- a/target/hexagon/meson.build > +++ b/target/hexagon/meson.build > @@ -273,7 +273,8 @@ hexagon_ss.add(files( > # idef-generated-enabled-instructions > # > idef_parser_enabled = get_option('hexagon_idef_parser') -if > idef_parser_enabled and 'hexagon-linux-user' in target_dirs > +if idef_parser_enabled and ('hexagon-linux-user' in target_dirs or > + 'hexagon-softmmu' in target_dirs) > idef_parser_input_generated = custom_target( > 'idef_parser_input.h.inc', > output: 'idef_parser_input.h.inc', Move this to later patch "add build config for softmmu"
© 2016 - 2025 Red Hat, Inc.