With hardware PTE supported, bit A will be set if there is read access
or instruction fetch, and bit D will be set with write access.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
target/loongarch/cpu-mmu.h | 26 ++++++++++++++++++++
target/loongarch/cpu_helper.c | 45 +++++++++++++++++++++++++++++++----
2 files changed, 67 insertions(+), 4 deletions(-)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 2e63324abe..cb596dd57a 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -61,6 +61,32 @@ static inline bool pte_write(CPULoongArchState *env, uint64_t entry)
return !!writable;
}
+/*
+ * The folloing functions should be called with PTW enable checked
+ * With hardware PTW enabled
+ * Bit D will be set by hardware with write access
+ * Bit A will be set by hardware with read/intruction fetch access
+ */
+static inline uint64_t pte_mkaccess(uint64_t entry)
+{
+ return FIELD_DP64(entry, TLBENTRY, V, 1);
+}
+
+static inline uint64_t pte_mkdirty(uint64_t entry)
+{
+ return FIELD_DP64(entry, TLBENTRY, D, 1);
+}
+
+static inline bool pte_access(uint64_t entry)
+{
+ return !!FIELD_EX64(entry, TLBENTRY, V);
+}
+
+static inline bool pte_dirty(uint64_t entry)
+{
+ return !!FIELD_EX64(entry, TLBENTRY, D);
+}
+
bool check_ps(CPULoongArchState *ent, uint8_t ps);
TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
MMUAccessType access_type, int mmu_idx);
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 79c994255f..7b0bac3dc9 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -111,11 +111,12 @@ TLBRet loongarch_page_table_walker(CPULoongArchState *env,
int access_type, int mmu_idx)
{
CPUState *cs = env_cpu(env);
- target_ulong index, phys;
+ target_ulong index, phys = 0;
uint64_t dir_base, dir_width;
- uint64_t base;
+ uint64_t base, pte;
int level;
vaddr address;
+ TLBRet ret;
address = context->addr;
if ((address >> 63) & 0x1) {
@@ -145,6 +146,7 @@ TLBRet loongarch_page_table_walker(CPULoongArchState *env,
/* pte */
if (FIELD_EX64(base, TLBENTRY, HUGE)) {
/* Huge Page. base is pte */
+ pte = base;
base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
@@ -156,12 +158,13 @@ TLBRet loongarch_page_table_walker(CPULoongArchState *env,
context->pte_buddy[0] = base;
context->pte_buddy[1] = base + BIT_ULL(dir_base);
base += (BIT_ULL(dir_base) & address);
+ index = 0;
} else {
/* Normal Page. base points to pte */
get_dir_base_width(env, &dir_base, &dir_width, 0);
index = (address >> dir_base) & ((1 << dir_width) - 1);
phys = base | index << 3;
- base = ldq_phys(cs->as, phys);
+ base = pte = ldq_phys(cs->as, phys);
if (cpu_has_ptw(env)) {
index &= 1;
context->pte_buddy[index] = base;
@@ -172,7 +175,41 @@ TLBRet loongarch_page_table_walker(CPULoongArchState *env,
context->ps = dir_base;
context->pte = base;
- return loongarch_check_pte(env, context, access_type, mmu_idx);
+ ret = loongarch_check_pte(env, context, access_type, mmu_idx);
+
+ /*
+ * Update bit A/D with hardware PTW supported
+ *
+ * Fixme: need atomic compchxg operation with pte update, other vCPUs may
+ * update pte at the same time.
+ */
+ if (ret == TLBRET_MATCH && cpu_has_ptw(env)) {
+ if (access_type == MMU_DATA_STORE && pte_dirty(base)) {
+ return ret;
+ }
+
+ if (access_type != MMU_DATA_STORE && pte_access(base)) {
+ return ret;
+ }
+
+ base = pte_mkaccess(pte);
+ context->pte_buddy[index] = pte_mkaccess(context->pte_buddy[index]);
+ if (access_type == MMU_DATA_STORE) {
+ base = pte_mkdirty(base);
+ context->pte_buddy[index] = pte_mkdirty(context->pte_buddy[index]);
+ }
+ stq_phys(cs->as, phys, base);
+
+ /* Bit A/D need be updated with both Even/Odd page with huge pte */
+ if (FIELD_EX64(base, TLBENTRY, HUGE)) {
+ context->pte_buddy[1] = pte_mkaccess(context->pte_buddy[1]);
+ if (access_type == MMU_DATA_STORE) {
+ context->pte_buddy[1] = pte_mkdirty(context->pte_buddy[1]);
+ }
+ }
+ }
+
+ return ret;
}
static TLBRet loongarch_map_address(CPULoongArchState *env,
--
2.39.3