The concept of mmu idx between QEMU TLB and LoongArch TLB emulation
is different:
mmu idx in QEMU TLB comes from currently working mode of vCPU
mmu idx from LoongArch TLB is page priviledge level
With copy_from_user() executed in system, vCPU is in kernel mode
however PLV of PTE is user mode.
Here field KM is added in TLB MISC to track whether the lo0/lo1 pte
entry is accessed in kernel mode. If set, when LoongArch TLB is
flushed, need flush QEMU TLB with mmu idx MMU_KERNEL_IDX.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
target/loongarch/cpu-mmu.h | 2 ++
target/loongarch/cpu.h | 11 +++++++++++
target/loongarch/cpu_helper.c | 3 +++
target/loongarch/tcg/tlb_helper.c | 26 ++++++++++++++++++++++++++
4 files changed, 42 insertions(+)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 9d909d36ec..3de707dd56 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -25,6 +25,8 @@ typedef struct mmu_context {
hwaddr physical;
int ps; /* page size shift */
int prot;
+ int tlb_index;
+ int mmu_index;
} mmu_context;
bool check_ps(CPULoongArchState *ent, uint8_t ps);
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index 7731f6acdc..2ae5f655a6 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -253,6 +253,17 @@ FIELD(TLB_MISC, E, 0, 1)
FIELD(TLB_MISC, ASID, 1, 10)
FIELD(TLB_MISC, VPPN, 13, 35)
FIELD(TLB_MISC, PS, 48, 6)
+/*
+ * Used by QEMU software, concept of mmu idx between QEMU TLB and LoongArch
+ * TLB emulation is different:
+ * mmu idx in QEMU TLB is current working mode of vCPU
+ * mmu idx in LoongArch TLB is PLV access level
+ * When funtion copy_from_user() executed with system emulation method,
+ * vCPU is in kernel mode however accessed address is user memory space.
+ *
+ * TLB lo0/lo1 entry mask with PLV MMU_USER_IDX accessed in kernel mode
+ */
+FIELD(TLB_MISC, KM, 54, 2)
#define LSX_LEN (128)
#define LASX_LEN (256)
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 9f64cb40cf..afdf05649b 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -95,6 +95,7 @@ int loongarch_check_pte(CPULoongArchState *env, mmu_context *context,
context->physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
(context->vaddr & MAKE_64BIT_MASK(0, tlb_ps));
context->prot = PAGE_READ;
+ context->mmu_index = tlb_plv;
if (tlb_d) {
context->prot |= PAGE_WRITE;
}
@@ -217,6 +218,7 @@ int get_physical_address(CPULoongArchState *env, mmu_context *context,
if (da & !pg) {
context->physical = address & TARGET_PHYS_MASK;
context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ context->mmu_index = MMU_DA_IDX;
return TLBRET_MATCH;
}
@@ -236,6 +238,7 @@ int get_physical_address(CPULoongArchState *env, mmu_context *context,
if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
context->physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ context->mmu_index = MMU_DA_IDX;
return TLBRET_MATCH;
}
}
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index d1d19c5e70..715c5a20da 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -172,6 +172,7 @@ static void fill_tlb_entry(CPULoongArchState *env, int index)
}
/* Store page size in field PS */
+ tlb->tlb_misc = 0;
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
@@ -510,6 +511,24 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
tlb_flush(env_cpu(env));
}
+/*
+ * Record tlb entry with virtual address from user mode accessed from
+ * vCPU kernel mode.
+ *
+ * If set, when LoongArch TLB is flushed, need flush QEMU TLB with mmu
+ * idx MMU_KERNEL_IDX
+ */
+static inline void tlb_set_accessed(CPULoongArchState *env, vaddr address,
+ int index)
+{
+ LoongArchTLB *tlb = &env->tlb[index];
+ uint8_t tlb_ps, n;
+
+ tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
+ n = (address >> tlb_ps) & 0x1;/* Odd or even */
+ tlb->tlb_misc |= BIT_ULL(R_TLB_MISC_KM_SHIFT + n);
+}
+
bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -529,6 +548,12 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
+
+ /* user mode address space is accessed in vCPU kernel mode */
+ if (mmu_idx == MMU_KERNEL_IDX && context.mmu_index == MMU_USER_IDX) {
+ tlb_set_accessed(env, address, context.tlb_index);
+ }
+
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
" prot %d\n", __func__, address, physical, prot);
@@ -665,6 +690,7 @@ static int loongarch_map_tlb_entry(CPULoongArchState *env, mmu_context *context,
tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
context->ps = tlb_ps;
context->pte = tlb_entry;
+ context->tlb_index = index;
return loongarch_check_pte(env, context, access_type, mmu_idx);
}
--
2.39.3