[PATCH] feat: add loongarch page table walker support for debugger memory access

Miao Hao posted 1 patch 3 months, 2 weeks ago
There is a newer version of this series
target/loongarch/cpu_helper.c     | 104 ++++++++++++++++++++++++++++--
target/loongarch/internals.h      |   4 +-
target/loongarch/tcg/tlb_helper.c |   4 +-
3 files changed, 104 insertions(+), 8 deletions(-)
[PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by Miao Hao 3 months, 2 weeks ago
Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
---
 target/loongarch/cpu_helper.c     | 104 ++++++++++++++++++++++++++++--
 target/loongarch/internals.h      |   4 +-
 target/loongarch/tcg/tlb_helper.c |   4 +-
 3 files changed, 104 insertions(+), 8 deletions(-)

diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 580362ac3e..c0828a813d 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
     return false;
 }
 
+static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical,
+                                 int *prot, target_ulong address)
+{
+    CPUState *cs = env_cpu(env);
+    target_ulong index, phys;
+    int shift;
+    uint64_t dir_base, dir_width;
+    uint64_t base;
+    int level;
+
+    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
+    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
+    shift = (shift + 1) * 3;
+
+    if ((address >> 63) & 0x1) {
+        base = env->CSR_PGDH;
+    } else {
+        base = env->CSR_PGDL;
+    }
+    base &= TARGET_PHYS_MASK;
+
+    for (level = 4; level > 0; level--) {
+        get_dir_base_width(env, &dir_base, &dir_width, level);
+
+        if (dir_width != 0) {
+            /* get next level page directory */
+            index = (address >> dir_base) & ((1 << dir_width) - 1);
+            phys = base | index << shift;
+            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
+            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
+                /* mask off page dir permission bits */
+                base &= TARGET_PAGE_MASK;
+            } else {
+                /* base is a huge pte */
+                break;
+            }
+
+            if (base == 0) {
+                return TLBRET_NOMATCH;
+            }
+        }
+    }
+
+    /* pte */
+    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
+        /* Huge Page. base is pte */
+        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
+        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
+        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
+            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
+            base = FIELD_DP64(base, TLBENTRY, G, 1);
+        }
+    } else {
+        /* Normal Page. base points to pte */
+        get_dir_base_width(env, &dir_base, &dir_width, 0);
+        index = (address >> dir_base) & ((1 << dir_width) - 1);
+        phys = base | index << shift;
+        base = ldq_phys(cs->as, phys);
+    }
+
+    /* TODO: check plv and other bits? */
+
+    /* base is pte, in normal pte format */
+    if (!FIELD_EX64(base, TLBENTRY, V)) {
+        return TLBRET_NOMATCH;
+    }
+
+    if (!FIELD_EX64(base, TLBENTRY, D)) {
+        *prot = PAGE_READ;
+    } else {
+        *prot = PAGE_READ | PAGE_WRITE;
+    }
+
+    /* get TARGET_PAGE_SIZE aligned physical address */
+    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
+    /* mask RPLV, NX, NR bits */
+    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
+    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
+    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
+    /* mask other attribute bits */
+    *physical = base & TARGET_PAGE_MASK;
+
+    return 0;
+}
+
 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
                                  int *prot, target_ulong address,
-                                 MMUAccessType access_type, int mmu_idx)
+                                 MMUAccessType access_type, int mmu_idx,
+                                 int is_debug)
 {
     int index, match;
 
@@ -151,6 +237,13 @@ static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
     if (match) {
         return loongarch_map_tlb_entry(env, physical, prot,
                                        address, access_type, index, mmu_idx);
+    } else if (is_debug) {
+        /*
+         * For debugger memory access, we want to do the map when there is a
+         * legal mapping, even if the mapping is not yet in TLB. return 0 if
+         * there is a valid map, else none zero.
+         */
+        return loongarch_page_table_walker(env, physical, prot, address);
     }
 
     return TLBRET_NOMATCH;
@@ -158,7 +251,8 @@ static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
 #else
 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
                                  int *prot, target_ulong address,
-                                 MMUAccessType access_type, int mmu_idx)
+                                 MMUAccessType access_type, int mmu_idx,
+                                 int is_debug)
 {
     return TLBRET_NOMATCH;
 }
@@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
 
 int get_physical_address(CPULoongArchState *env, hwaddr *physical,
                          int *prot, target_ulong address,
-                         MMUAccessType access_type, int mmu_idx)
+                         MMUAccessType access_type, int mmu_idx, int is_debug)
 {
     int user_mode = mmu_idx == MMU_USER_IDX;
     int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
@@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical,
 
     /* Mapped address */
     return loongarch_map_address(env, physical, prot, address,
-                                 access_type, mmu_idx);
+                                 access_type, mmu_idx, is_debug);
 }
 
 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@@ -232,7 +326,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
     int prot;
 
     if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
-                             cpu_mmu_index(cs, false)) != 0) {
+                             cpu_mmu_index(cs, false), 1) != 0) {
         return -1;
     }
     return phys_addr;
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
index 1a02427627..bc2ca30746 100644
--- a/target/loongarch/internals.h
+++ b/target/loongarch/internals.h
@@ -56,7 +56,9 @@ bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
                           int *index);
 int get_physical_address(CPULoongArchState *env, hwaddr *physical,
                          int *prot, target_ulong address,
-                         MMUAccessType access_type, int mmu_idx);
+                         MMUAccessType access_type, int mmu_idx, int is_debug);
+void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
+                               uint64_t *dir_width, target_ulong level);
 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
 
 #ifdef CONFIG_TCG
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index 97f38fc391..564f336df9 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -18,7 +18,7 @@
 #include "exec/log.h"
 #include "cpu-csr.h"
 
-static void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
+void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
                                uint64_t *dir_width, target_ulong level)
 {
     switch (level) {
@@ -485,7 +485,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 
     /* Data access */
     ret = get_physical_address(env, &physical, &prot, address,
-                               access_type, mmu_idx);
+                               access_type, mmu_idx, 0);
 
     if (ret == TLBRET_MATCH) {
         tlb_set_page(cs, address & TARGET_PAGE_MASK,
-- 
2.34.1
Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 3 months, 2 weeks ago
Hi Miao,

Thanks for doing this. It is useful to debug VM.

On 2024/12/19 上午11:24, Miao Hao wrote:
> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
> ---
>   target/loongarch/cpu_helper.c     | 104 ++++++++++++++++++++++++++++--
>   target/loongarch/internals.h      |   4 +-
>   target/loongarch/tcg/tlb_helper.c |   4 +-
>   3 files changed, 104 insertions(+), 8 deletions(-)
> 
> diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
> index 580362ac3e..c0828a813d 100644
> --- a/target/loongarch/cpu_helper.c
> +++ b/target/loongarch/cpu_helper.c
> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
>       return false;
>   }
>   
> +static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical,
> +                                 int *prot, target_ulong address)
> +{
> +    CPUState *cs = env_cpu(env);
> +    target_ulong index, phys;
> +    int shift;
> +    uint64_t dir_base, dir_width;
> +    uint64_t base;
> +    int level;
> +
> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
> +    shift = (shift + 1) * 3;
> +
> +    if ((address >> 63) & 0x1) {
> +        base = env->CSR_PGDH;
> +    } else {
> +        base = env->CSR_PGDL;
> +    }
> +    base &= TARGET_PHYS_MASK;
> +
> +    for (level = 4; level > 0; level--) {
> +        get_dir_base_width(env, &dir_base, &dir_width, level);
> +
> +        if (dir_width != 0) {
how about check whether it equeal to 0 firstly like this?
            if (dir_width == 0)
                continue;

> +            /* get next level page directory */
> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
> +            phys = base | index << shift;
Here will only load first 64bit if shift is not 0, such as 1:128bit, 
2:192bit, 3:256bit

> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
> +                /* mask off page dir permission bits */
> +                base &= TARGET_PAGE_MASK;
> +            } else {
> +                /* base is a huge pte */
> +                break;
> +            }
> +
> +            if (base == 0) {
physical adddress 0 is valid and Valid bit will be checked in later. 
Can we remove this?
> +                return TLBRET_NOMATCH;
> +            }

> +        }
> +    }
> +
> +    /* pte */
> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
> +        /* Huge Page. base is pte */
> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
> +        }
> +    } else {
> +        /* Normal Page. base points to pte */
> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
> +        phys = base | index << shift;
Ditto, shift may be wider than 64-bit.

Regards
Bibo Mao
> +        base = ldq_phys(cs->as, phys);
> +    }
> +
> +    /* TODO: check plv and other bits? */
> +
> +    /* base is pte, in normal pte format */
> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
> +        return TLBRET_NOMATCH;
> +    }
> +
> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
> +        *prot = PAGE_READ;
> +    } else {
> +        *prot = PAGE_READ | PAGE_WRITE;
> +    }
> +
> +    /* get TARGET_PAGE_SIZE aligned physical address */
> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
> +    /* mask RPLV, NX, NR bits */
> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
> +    /* mask other attribute bits */
> +    *physical = base & TARGET_PAGE_MASK;
> +
> +    return 0;
> +}
> +
>   static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>                                    int *prot, target_ulong address,
> -                                 MMUAccessType access_type, int mmu_idx)
> +                                 MMUAccessType access_type, int mmu_idx,
> +                                 int is_debug)
>   {
>       int index, match;
>   
> @@ -151,6 +237,13 @@ static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>       if (match) {
>           return loongarch_map_tlb_entry(env, physical, prot,
>                                          address, access_type, index, mmu_idx);
> +    } else if (is_debug) {
> +        /*
> +         * For debugger memory access, we want to do the map when there is a
> +         * legal mapping, even if the mapping is not yet in TLB. return 0 if
> +         * there is a valid map, else none zero.
> +         */
> +        return loongarch_page_table_walker(env, physical, prot, address);
>       }
>   
>       return TLBRET_NOMATCH;
> @@ -158,7 +251,8 @@ static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>   #else
>   static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>                                    int *prot, target_ulong address,
> -                                 MMUAccessType access_type, int mmu_idx)
> +                                 MMUAccessType access_type, int mmu_idx,
> +                                 int is_debug)
>   {
>       return TLBRET_NOMATCH;
>   }
> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
>   
>   int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>                            int *prot, target_ulong address,
> -                         MMUAccessType access_type, int mmu_idx)
> +                         MMUAccessType access_type, int mmu_idx, int is_debug)
>   {
>       int user_mode = mmu_idx == MMU_USER_IDX;
>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>   
>       /* Mapped address */
>       return loongarch_map_address(env, physical, prot, address,
> -                                 access_type, mmu_idx);
> +                                 access_type, mmu_idx, is_debug);
>   }
>   
>   hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
> @@ -232,7 +326,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>       int prot;
>   
>       if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
> -                             cpu_mmu_index(cs, false)) != 0) {
> +                             cpu_mmu_index(cs, false), 1) != 0) {
>           return -1;
>       }
>       return phys_addr;
> diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
> index 1a02427627..bc2ca30746 100644
> --- a/target/loongarch/internals.h
> +++ b/target/loongarch/internals.h
> @@ -56,7 +56,9 @@ bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
>                             int *index);
>   int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>                            int *prot, target_ulong address,
> -                         MMUAccessType access_type, int mmu_idx);
> +                         MMUAccessType access_type, int mmu_idx, int is_debug);
> +void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
> +                               uint64_t *dir_width, target_ulong level);
>   hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
>   
>   #ifdef CONFIG_TCG
> diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
> index 97f38fc391..564f336df9 100644
> --- a/target/loongarch/tcg/tlb_helper.c
> +++ b/target/loongarch/tcg/tlb_helper.c
> @@ -18,7 +18,7 @@
>   #include "exec/log.h"
>   #include "cpu-csr.h"
>   
> -static void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
> +void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
>                                  uint64_t *dir_width, target_ulong level)
>   {
>       switch (level) {
> @@ -485,7 +485,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>   
>       /* Data access */
>       ret = get_physical_address(env, &physical, &prot, address,
> -                               access_type, mmu_idx);
> +                               access_type, mmu_idx, 0);
>   
>       if (ret == TLBRET_MATCH) {
>           tlb_set_page(cs, address & TARGET_PAGE_MASK,
> 


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by Miao Hao 3 months, 1 week ago
Hi Bibo,

Thanks for your review. I apologize for my late respond due to some 
personal reasons.

On 2024/12/19 17:57, bibo mao wrote:
> Hi Miao,
>
> Thanks for doing this. It is useful to debug VM.
>
> On 2024/12/19 上午11:24, Miao Hao wrote:
>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>> ---
>>   target/loongarch/cpu_helper.c     | 104 ++++++++++++++++++++++++++++--
>>   target/loongarch/internals.h      |   4 +-
>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>
>> diff --git a/target/loongarch/cpu_helper.c 
>> b/target/loongarch/cpu_helper.c
>> index 580362ac3e..c0828a813d 100644
>> --- a/target/loongarch/cpu_helper.c
>> +++ b/target/loongarch/cpu_helper.c
>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>> *env, target_ulong vaddr,
>>       return false;
>>   }
>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>> hwaddr *physical,
>> +                                 int *prot, target_ulong address)
>> +{
>> +    CPUState *cs = env_cpu(env);
>> +    target_ulong index, phys;
>> +    int shift;
>> +    uint64_t dir_base, dir_width;
>> +    uint64_t base;
>> +    int level;
>> +
>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>> +    shift = (shift + 1) * 3;

the assignment of variable shift and the corresponding comment is 
incorrect here, and details are logged in the v1.03 change log of 
LoongArch specification volume1 
(https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf)

     /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
     shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
     shift = shift + 3;

>> +
>> +    if ((address >> 63) & 0x1) {
>> +        base = env->CSR_PGDH;
>> +    } else {
>> +        base = env->CSR_PGDL;
>> +    }
>> +    base &= TARGET_PHYS_MASK;
>> +
>> +    for (level = 4; level > 0; level--) {
>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>> +
>> +        if (dir_width != 0) {
> how about check whether it equeal to 0 firstly like this?
>            if (dir_width == 0)
>                continue;
>
It's good to reduce code nesting, I will adopt this suggestion.
>> +            /* get next level page directory */
>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>> +            phys = base | index << shift;
> Here will only load first 64bit if shift is not 0, such as 1:128bit, 
> 2:192bit, 3:256bit
>
After fixing the assignment of shift, this issue no longer exists. Shift 
is less than or equal to 6, and index is 6 bit. Thus, index << shift is 
at most 12 bit.

>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>> +                /* mask off page dir permission bits */
>> +                base &= TARGET_PAGE_MASK;
>> +            } else {
>> +                /* base is a huge pte */
>> +                break;
>> +            }
>> +
>> +            if (base == 0) {
> physical adddress 0 is valid and Valid bit will be checked in later. 
> Can we remove this?
the value of base equals to 0 means that the current page directory 
entry does not point to next level page directory, so we return here
>> +                return TLBRET_NOMATCH;
>> +            }
>
>> +        }
>> +    }
>> +
>> +    /* pte */
>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>> +        /* Huge Page. base is pte */
>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>> +        }
>> +    } else {
>> +        /* Normal Page. base points to pte */
>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>> +        phys = base | index << shift;
> Ditto, shift may be wider than 64-bit.
>
> Regards
> Bibo Mao

Ditto, shift is less than or equal to 6.


Regards

Miao Hao

>> +        base = ldq_phys(cs->as, phys);
>> +    }
>> +
>> +    /* TODO: check plv and other bits? */
>> +
>> +    /* base is pte, in normal pte format */
>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>> +        return TLBRET_NOMATCH;
>> +    }
>> +
>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>> +        *prot = PAGE_READ;
>> +    } else {
>> +        *prot = PAGE_READ | PAGE_WRITE;
>> +    }
>> +
>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>> +    /* mask RPLV, NX, NR bits */
>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>> +    /* mask other attribute bits */
>> +    *physical = base & TARGET_PAGE_MASK;
>> +
>> +    return 0;
>> +}
>> +
>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>> *physical,
>>                                    int *prot, target_ulong address,
>> -                                 MMUAccessType access_type, int 
>> mmu_idx)
>> +                                 MMUAccessType access_type, int 
>> mmu_idx,
>> +                                 int is_debug)
>>   {
>>       int index, match;
>>   @@ -151,6 +237,13 @@ static int 
>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>       if (match) {
>>           return loongarch_map_tlb_entry(env, physical, prot,
>>                                          address, access_type, index, 
>> mmu_idx);
>> +    } else if (is_debug) {
>> +        /*
>> +         * For debugger memory access, we want to do the map when 
>> there is a
>> +         * legal mapping, even if the mapping is not yet in TLB. 
>> return 0 if
>> +         * there is a valid map, else none zero.
>> +         */
>> +        return loongarch_page_table_walker(env, physical, prot, 
>> address);
>>       }
>>         return TLBRET_NOMATCH;
>> @@ -158,7 +251,8 @@ static int 
>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>   #else
>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>> *physical,
>>                                    int *prot, target_ulong address,
>> -                                 MMUAccessType access_type, int 
>> mmu_idx)
>> +                                 MMUAccessType access_type, int 
>> mmu_idx,
>> +                                 int is_debug)
>>   {
>>       return TLBRET_NOMATCH;
>>   }
>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, 
>> target_ulong va,
>>     int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>>                            int *prot, target_ulong address,
>> -                         MMUAccessType access_type, int mmu_idx)
>> +                         MMUAccessType access_type, int mmu_idx, int 
>> is_debug)
>>   {
>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState *env, 
>> hwaddr *physical,
>>         /* Mapped address */
>>       return loongarch_map_address(env, physical, prot, address,
>> -                                 access_type, mmu_idx);
>> +                                 access_type, mmu_idx, is_debug);
>>   }
>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>> @@ -232,7 +326,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState 
>> *cs, vaddr addr)
>>       int prot;
>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>> MMU_DATA_LOAD,
>> -                             cpu_mmu_index(cs, false)) != 0) {
>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>           return -1;
>>       }
>>       return phys_addr;


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 3 months ago

On 2024/12/30 下午3:04, Miao Hao wrote:
> Hi Bibo,
> 
> Thanks for your review. I apologize for my late respond due to some 
> personal reasons.
> 
> On 2024/12/19 17:57, bibo mao wrote:
>> Hi Miao,
>>
>> Thanks for doing this. It is useful to debug VM.
>>
>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>> ---
>>>   target/loongarch/cpu_helper.c     | 104 ++++++++++++++++++++++++++++--
>>>   target/loongarch/internals.h      |   4 +-
>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/target/loongarch/cpu_helper.c 
>>> b/target/loongarch/cpu_helper.c
>>> index 580362ac3e..c0828a813d 100644
>>> --- a/target/loongarch/cpu_helper.c
>>> +++ b/target/loongarch/cpu_helper.c
>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>> *env, target_ulong vaddr,
>>>       return false;
>>>   }
>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>> hwaddr *physical,
>>> +                                 int *prot, target_ulong address)
>>> +{
>>> +    CPUState *cs = env_cpu(env);
>>> +    target_ulong index, phys;
>>> +    int shift;
>>> +    uint64_t dir_base, dir_width;
>>> +    uint64_t base;
>>> +    int level;
>>> +
>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>> +    shift = (shift + 1) * 3;
> 
> the assignment of variable shift and the corresponding comment is 
> incorrect here, and details are logged in the v1.03 change log of 
> LoongArch specification volume1 
> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
> 
> 
>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>      shift = shift + 3;
> 
>>> +
>>> +    if ((address >> 63) & 0x1) {
>>> +        base = env->CSR_PGDH;
>>> +    } else {
>>> +        base = env->CSR_PGDL;
>>> +    }
>>> +    base &= TARGET_PHYS_MASK;
>>> +
>>> +    for (level = 4; level > 0; level--) {
>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>> +
>>> +        if (dir_width != 0) {
>> how about check whether it equeal to 0 firstly like this?
>>            if (dir_width == 0)
>>                continue;
>>
> It's good to reduce code nesting, I will adopt this suggestion.
>>> +            /* get next level page directory */
>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>> +            phys = base | index << shift;
>> Here will only load first 64bit if shift is not 0, such as 1:128bit, 
>> 2:192bit, 3:256bit
>>
> After fixing the assignment of shift, this issue no longer exists. Shift 
> is less than or equal to 6, and index is 6 bit. Thus, index << shift is 
> at most 12 bit.
> 
>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>> +                /* mask off page dir permission bits */
>>> +                base &= TARGET_PAGE_MASK;
>>> +            } else {
>>> +                /* base is a huge pte */
>>> +                break;
>>> +            }
>>> +
>>> +            if (base == 0) {
>> physical adddress 0 is valid and Valid bit will be checked in later. 
>> Can we remove this?
> the value of base equals to 0 means that the current page directory 
> entry does not point to next level page directory, so we return here
There is no document about page directory entry with value 0, do I miss 
something?

In theory physical address 0 is valid, page with physical address 0 can 
be set as page directory entry.

Regards
Bibo Mao

>>> +                return TLBRET_NOMATCH;
>>> +            }
>>
>>> +        }
>>> +    }
>>> +
>>> +    /* pte */
>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>> +        /* Huge Page. base is pte */
>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>> +        }
>>> +    } else {
>>> +        /* Normal Page. base points to pte */
>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>> +        phys = base | index << shift;
>> Ditto, shift may be wider than 64-bit.
>>
>> Regards
>> Bibo Mao
> 
> Ditto, shift is less than or equal to 6.
> 
> 
> Regards
> 
> Miao Hao
> 
>>> +        base = ldq_phys(cs->as, phys);
>>> +    }
>>> +
>>> +    /* TODO: check plv and other bits? */
>>> +
>>> +    /* base is pte, in normal pte format */
>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>> +        return TLBRET_NOMATCH;
>>> +    }
>>> +
>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>> +        *prot = PAGE_READ;
>>> +    } else {
>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>> +    }
>>> +
>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>> +    /* mask RPLV, NX, NR bits */
>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>> +    /* mask other attribute bits */
>>> +    *physical = base & TARGET_PAGE_MASK;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>> *physical,
>>>                                    int *prot, target_ulong address,
>>> -                                 MMUAccessType access_type, int 
>>> mmu_idx)
>>> +                                 MMUAccessType access_type, int 
>>> mmu_idx,
>>> +                                 int is_debug)
>>>   {
>>>       int index, match;
>>>   @@ -151,6 +237,13 @@ static int 
>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>       if (match) {
>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>                                          address, access_type, index, 
>>> mmu_idx);
>>> +    } else if (is_debug) {
>>> +        /*
>>> +         * For debugger memory access, we want to do the map when 
>>> there is a
>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>> return 0 if
>>> +         * there is a valid map, else none zero.
>>> +         */
>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>> address);
>>>       }
>>>         return TLBRET_NOMATCH;
>>> @@ -158,7 +251,8 @@ static int 
>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>   #else
>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>> *physical,
>>>                                    int *prot, target_ulong address,
>>> -                                 MMUAccessType access_type, int 
>>> mmu_idx)
>>> +                                 MMUAccessType access_type, int 
>>> mmu_idx,
>>> +                                 int is_debug)
>>>   {
>>>       return TLBRET_NOMATCH;
>>>   }
>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, 
>>> target_ulong va,
>>>     int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>>>                            int *prot, target_ulong address,
>>> -                         MMUAccessType access_type, int mmu_idx)
>>> +                         MMUAccessType access_type, int mmu_idx, int 
>>> is_debug)
>>>   {
>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState *env, 
>>> hwaddr *physical,
>>>         /* Mapped address */
>>>       return loongarch_map_address(env, physical, prot, address,
>>> -                                 access_type, mmu_idx);
>>> +                                 access_type, mmu_idx, is_debug);
>>>   }
>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>> @@ -232,7 +326,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState 
>>> *cs, vaddr addr)
>>>       int prot;
>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>> MMU_DATA_LOAD,
>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>           return -1;
>>>       }
>>>       return phys_addr;
> 


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by Miao Hao 3 months ago
On 2024/12/31 19:29, bibo mao wrote:
>
>
> On 2024/12/30 下午3:04, Miao Hao wrote:
>> Hi Bibo,
>>
>> Thanks for your review. I apologize for my late respond due to some 
>> personal reasons.
>>
>> On 2024/12/19 17:57, bibo mao wrote:
>>> Hi Miao,
>>>
>>> Thanks for doing this. It is useful to debug VM.
>>>
>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>> ---
>>>>   target/loongarch/cpu_helper.c     | 104 
>>>> ++++++++++++++++++++++++++++--
>>>>   target/loongarch/internals.h      |   4 +-
>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>
>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>> b/target/loongarch/cpu_helper.c
>>>> index 580362ac3e..c0828a813d 100644
>>>> --- a/target/loongarch/cpu_helper.c
>>>> +++ b/target/loongarch/cpu_helper.c
>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>> *env, target_ulong vaddr,
>>>>       return false;
>>>>   }
>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>> hwaddr *physical,
>>>> +                                 int *prot, target_ulong address)
>>>> +{
>>>> +    CPUState *cs = env_cpu(env);
>>>> +    target_ulong index, phys;
>>>> +    int shift;
>>>> +    uint64_t dir_base, dir_width;
>>>> +    uint64_t base;
>>>> +    int level;
>>>> +
>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>> +    shift = (shift + 1) * 3;
>>
>> the assignment of variable shift and the corresponding comment is 
>> incorrect here, and details are logged in the v1.03 change log of 
>> LoongArch specification volume1 
>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>
>>
>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>      shift = shift + 3;
>>
>>>> +
>>>> +    if ((address >> 63) & 0x1) {
>>>> +        base = env->CSR_PGDH;
>>>> +    } else {
>>>> +        base = env->CSR_PGDL;
>>>> +    }
>>>> +    base &= TARGET_PHYS_MASK;
>>>> +
>>>> +    for (level = 4; level > 0; level--) {
>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>> +
>>>> +        if (dir_width != 0) {
>>> how about check whether it equeal to 0 firstly like this?
>>>            if (dir_width == 0)
>>>                continue;
>>>
>> It's good to reduce code nesting, I will adopt this suggestion.
>>>> +            /* get next level page directory */
>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>> +            phys = base | index << shift;
>>> Here will only load first 64bit if shift is not 0, such as 1:128bit, 
>>> 2:192bit, 3:256bit
>>>
>> After fixing the assignment of shift, this issue no longer exists. 
>> Shift is less than or equal to 6, and index is 6 bit. Thus, index << 
>> shift is at most 12 bit.
>>
>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>> +                /* mask off page dir permission bits */
>>>> +                base &= TARGET_PAGE_MASK;
>>>> +            } else {
>>>> +                /* base is a huge pte */
>>>> +                break;
>>>> +            }
>>>> +
>>>> +            if (base == 0) {
>>> physical adddress 0 is valid and Valid bit will be checked in later. 
>>> Can we remove this?
>> the value of base equals to 0 means that the current page directory 
>> entry does not point to next level page directory, so we return here
> There is no document about page directory entry with value 0, do I 
> miss something?
>
> In theory physical address 0 is valid, page with physical address 0 
> can be set as page directory entry.
>
> Regards
> Bibo Mao
>
OK, it seems that entries in page directory table does not have 
attribute bits, so we can remove this.


Regards

Miao Hao

>>>> +                return TLBRET_NOMATCH;
>>>> +            }
>>>
>>>> +        }
>>>> +    }
>>>> +
>>>> +    /* pte */
>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>> +        /* Huge Page. base is pte */
>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>> +        }
>>>> +    } else {
>>>> +        /* Normal Page. base points to pte */
>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>> +        phys = base | index << shift;
>>> Ditto, shift may be wider than 64-bit.
>>>
>>> Regards
>>> Bibo Mao
>>
>> Ditto, shift is less than or equal to 6.
>>
>>
>> Regards
>>
>> Miao Hao
>>
>>>> +        base = ldq_phys(cs->as, phys);
>>>> +    }
>>>> +
>>>> +    /* TODO: check plv and other bits? */
>>>> +
>>>> +    /* base is pte, in normal pte format */
>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>> +        return TLBRET_NOMATCH;
>>>> +    }
>>>> +
>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>> +        *prot = PAGE_READ;
>>>> +    } else {
>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>> +    }
>>>> +
>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>> +    /* mask RPLV, NX, NR bits */
>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>> +    /* mask other attribute bits */
>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>> *physical,
>>>>                                    int *prot, target_ulong address,
>>>> -                                 MMUAccessType access_type, int 
>>>> mmu_idx)
>>>> +                                 MMUAccessType access_type, int 
>>>> mmu_idx,
>>>> +                                 int is_debug)
>>>>   {
>>>>       int index, match;
>>>>   @@ -151,6 +237,13 @@ static int 
>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>       if (match) {
>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>                                          address, access_type, 
>>>> index, mmu_idx);
>>>> +    } else if (is_debug) {
>>>> +        /*
>>>> +         * For debugger memory access, we want to do the map when 
>>>> there is a
>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>> return 0 if
>>>> +         * there is a valid map, else none zero.
>>>> +         */
>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>> address);
>>>>       }
>>>>         return TLBRET_NOMATCH;
>>>> @@ -158,7 +251,8 @@ static int 
>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>   #else
>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>> *physical,
>>>>                                    int *prot, target_ulong address,
>>>> -                                 MMUAccessType access_type, int 
>>>> mmu_idx)
>>>> +                                 MMUAccessType access_type, int 
>>>> mmu_idx,
>>>> +                                 int is_debug)
>>>>   {
>>>>       return TLBRET_NOMATCH;
>>>>   }
>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, 
>>>> target_ulong va,
>>>>     int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>>>>                            int *prot, target_ulong address,
>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>> int is_debug)
>>>>   {
>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>> *env, hwaddr *physical,
>>>>         /* Mapped address */
>>>>       return loongarch_map_address(env, physical, prot, address,
>>>> -                                 access_type, mmu_idx);
>>>> +                                 access_type, mmu_idx, is_debug);
>>>>   }
>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>       int prot;
>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>> MMU_DATA_LOAD,
>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>           return -1;
>>>>       }
>>>>       return phys_addr;
>>


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 2 months, 3 weeks ago
Miao,

What is status about this patch? Will there be updated version?

Regards
Bibo Mao

On 2025/1/2 下午2:33, Miao Hao wrote:
> 
> On 2024/12/31 19:29, bibo mao wrote:
>>
>>
>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>> Hi Bibo,
>>>
>>> Thanks for your review. I apologize for my late respond due to some 
>>> personal reasons.
>>>
>>> On 2024/12/19 17:57, bibo mao wrote:
>>>> Hi Miao,
>>>>
>>>> Thanks for doing this. It is useful to debug VM.
>>>>
>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>> ---
>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>> ++++++++++++++++++++++++++++--
>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>
>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>> b/target/loongarch/cpu_helper.c
>>>>> index 580362ac3e..c0828a813d 100644
>>>>> --- a/target/loongarch/cpu_helper.c
>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>> *env, target_ulong vaddr,
>>>>>       return false;
>>>>>   }
>>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>>> hwaddr *physical,
>>>>> +                                 int *prot, target_ulong address)
>>>>> +{
>>>>> +    CPUState *cs = env_cpu(env);
>>>>> +    target_ulong index, phys;
>>>>> +    int shift;
>>>>> +    uint64_t dir_base, dir_width;
>>>>> +    uint64_t base;
>>>>> +    int level;
>>>>> +
>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>> +    shift = (shift + 1) * 3;
>>>
>>> the assignment of variable shift and the corresponding comment is 
>>> incorrect here, and details are logged in the v1.03 change log of 
>>> LoongArch specification volume1 
>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>
>>>
>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>      shift = shift + 3;
>>>
>>>>> +
>>>>> +    if ((address >> 63) & 0x1) {
>>>>> +        base = env->CSR_PGDH;
>>>>> +    } else {
>>>>> +        base = env->CSR_PGDL;
>>>>> +    }
>>>>> +    base &= TARGET_PHYS_MASK;
>>>>> +
>>>>> +    for (level = 4; level > 0; level--) {
>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>> +
>>>>> +        if (dir_width != 0) {
>>>> how about check whether it equeal to 0 firstly like this?
>>>>            if (dir_width == 0)
>>>>                continue;
>>>>
>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>> +            /* get next level page directory */
>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>> +            phys = base | index << shift;
>>>> Here will only load first 64bit if shift is not 0, such as 1:128bit, 
>>>> 2:192bit, 3:256bit
>>>>
>>> After fixing the assignment of shift, this issue no longer exists. 
>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index << 
>>> shift is at most 12 bit.
>>>
>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>> +                /* mask off page dir permission bits */
>>>>> +                base &= TARGET_PAGE_MASK;
>>>>> +            } else {
>>>>> +                /* base is a huge pte */
>>>>> +                break;
>>>>> +            }
>>>>> +
>>>>> +            if (base == 0) {
>>>> physical adddress 0 is valid and Valid bit will be checked in later. 
>>>> Can we remove this?
>>> the value of base equals to 0 means that the current page directory 
>>> entry does not point to next level page directory, so we return here
>> There is no document about page directory entry with value 0, do I 
>> miss something?
>>
>> In theory physical address 0 is valid, page with physical address 0 
>> can be set as page directory entry.
>>
>> Regards
>> Bibo Mao
>>
> OK, it seems that entries in page directory table does not have 
> attribute bits, so we can remove this.
> 
> 
> Regards
> 
> Miao Hao
> 
>>>>> +                return TLBRET_NOMATCH;
>>>>> +            }
>>>>
>>>>> +        }
>>>>> +    }
>>>>> +
>>>>> +    /* pte */
>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>> +        /* Huge Page. base is pte */
>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>> +        }
>>>>> +    } else {
>>>>> +        /* Normal Page. base points to pte */
>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>> +        phys = base | index << shift;
>>>> Ditto, shift may be wider than 64-bit.
>>>>
>>>> Regards
>>>> Bibo Mao
>>>
>>> Ditto, shift is less than or equal to 6.
>>>
>>>
>>> Regards
>>>
>>> Miao Hao
>>>
>>>>> +        base = ldq_phys(cs->as, phys);
>>>>> +    }
>>>>> +
>>>>> +    /* TODO: check plv and other bits? */
>>>>> +
>>>>> +    /* base is pte, in normal pte format */
>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>> +        return TLBRET_NOMATCH;
>>>>> +    }
>>>>> +
>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>> +        *prot = PAGE_READ;
>>>>> +    } else {
>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>> +    }
>>>>> +
>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>> +    /* mask RPLV, NX, NR bits */
>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>> +    /* mask other attribute bits */
>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>> +
>>>>> +    return 0;
>>>>> +}
>>>>> +
>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>> *physical,
>>>>>                                    int *prot, target_ulong address,
>>>>> -                                 MMUAccessType access_type, int 
>>>>> mmu_idx)
>>>>> +                                 MMUAccessType access_type, int 
>>>>> mmu_idx,
>>>>> +                                 int is_debug)
>>>>>   {
>>>>>       int index, match;
>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>       if (match) {
>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>                                          address, access_type, 
>>>>> index, mmu_idx);
>>>>> +    } else if (is_debug) {
>>>>> +        /*
>>>>> +         * For debugger memory access, we want to do the map when 
>>>>> there is a
>>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>>> return 0 if
>>>>> +         * there is a valid map, else none zero.
>>>>> +         */
>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>> address);
>>>>>       }
>>>>>         return TLBRET_NOMATCH;
>>>>> @@ -158,7 +251,8 @@ static int 
>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>   #else
>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>> *physical,
>>>>>                                    int *prot, target_ulong address,
>>>>> -                                 MMUAccessType access_type, int 
>>>>> mmu_idx)
>>>>> +                                 MMUAccessType access_type, int 
>>>>> mmu_idx,
>>>>> +                                 int is_debug)
>>>>>   {
>>>>>       return TLBRET_NOMATCH;
>>>>>   }
>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, 
>>>>> target_ulong va,
>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>>>>>                            int *prot, target_ulong address,
>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>>> int is_debug)
>>>>>   {
>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>> *env, hwaddr *physical,
>>>>>         /* Mapped address */
>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>> -                                 access_type, mmu_idx);
>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>   }
>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>       int prot;
>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>> MMU_DATA_LOAD,
>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>           return -1;
>>>>>       }
>>>>>       return phys_addr;
>>>


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by Miao Hao 2 months, 3 weeks ago
On 2025/1/14 17:00, bibo mao wrote:
> Miao,
>
> What is status about this patch? Will there be updated version?
>
> Regards
> Bibo Mao

Sorry, I'm waiting for your reply. I have just updated the patch for 
version 3.


Regards

Miao Hao

>
> On 2025/1/2 下午2:33, Miao Hao wrote:
>>
>> On 2024/12/31 19:29, bibo mao wrote:
>>>
>>>
>>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>>> Hi Bibo,
>>>>
>>>> Thanks for your review. I apologize for my late respond due to some 
>>>> personal reasons.
>>>>
>>>> On 2024/12/19 17:57, bibo mao wrote:
>>>>> Hi Miao,
>>>>>
>>>>> Thanks for doing this. It is useful to debug VM.
>>>>>
>>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>>> ---
>>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>>> ++++++++++++++++++++++++++++--
>>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>>
>>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>>> b/target/loongarch/cpu_helper.c
>>>>>> index 580362ac3e..c0828a813d 100644
>>>>>> --- a/target/loongarch/cpu_helper.c
>>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>>> *env, target_ulong vaddr,
>>>>>>       return false;
>>>>>>   }
>>>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>>>> hwaddr *physical,
>>>>>> +                                 int *prot, target_ulong address)
>>>>>> +{
>>>>>> +    CPUState *cs = env_cpu(env);
>>>>>> +    target_ulong index, phys;
>>>>>> +    int shift;
>>>>>> +    uint64_t dir_base, dir_width;
>>>>>> +    uint64_t base;
>>>>>> +    int level;
>>>>>> +
>>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>> +    shift = (shift + 1) * 3;
>>>>
>>>> the assignment of variable shift and the corresponding comment is 
>>>> incorrect here, and details are logged in the v1.03 change log of 
>>>> LoongArch specification volume1 
>>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>>
>>>>
>>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>      shift = shift + 3;
>>>>
>>>>>> +
>>>>>> +    if ((address >> 63) & 0x1) {
>>>>>> +        base = env->CSR_PGDH;
>>>>>> +    } else {
>>>>>> +        base = env->CSR_PGDL;
>>>>>> +    }
>>>>>> +    base &= TARGET_PHYS_MASK;
>>>>>> +
>>>>>> +    for (level = 4; level > 0; level--) {
>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>>> +
>>>>>> +        if (dir_width != 0) {
>>>>> how about check whether it equeal to 0 firstly like this?
>>>>>            if (dir_width == 0)
>>>>>                continue;
>>>>>
>>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>>> +            /* get next level page directory */
>>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>> +            phys = base | index << shift;
>>>>> Here will only load first 64bit if shift is not 0, such as 
>>>>> 1:128bit, 2:192bit, 3:256bit
>>>>>
>>>> After fixing the assignment of shift, this issue no longer exists. 
>>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index 
>>>> << shift is at most 12 bit.
>>>>
>>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>> +                /* mask off page dir permission bits */
>>>>>> +                base &= TARGET_PAGE_MASK;
>>>>>> +            } else {
>>>>>> +                /* base is a huge pte */
>>>>>> +                break;
>>>>>> +            }
>>>>>> +
>>>>>> +            if (base == 0) {
>>>>> physical adddress 0 is valid and Valid bit will be checked in 
>>>>> later. Can we remove this?
>>>> the value of base equals to 0 means that the current page directory 
>>>> entry does not point to next level page directory, so we return here
>>> There is no document about page directory entry with value 0, do I 
>>> miss something?
>>>
>>> In theory physical address 0 is valid, page with physical address 0 
>>> can be set as page directory entry.
>>>
>>> Regards
>>> Bibo Mao
>>>
>> OK, it seems that entries in page directory table does not have 
>> attribute bits, so we can remove this.
>>
>>
>> Regards
>>
>> Miao Hao
>>
>>>>>> +                return TLBRET_NOMATCH;
>>>>>> +            }
>>>>>
>>>>>> +        }
>>>>>> +    }
>>>>>> +
>>>>>> +    /* pte */
>>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>> +        /* Huge Page. base is pte */
>>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>>> +        }
>>>>>> +    } else {
>>>>>> +        /* Normal Page. base points to pte */
>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>> +        phys = base | index << shift;
>>>>> Ditto, shift may be wider than 64-bit.
>>>>>
>>>>> Regards
>>>>> Bibo Mao
>>>>
>>>> Ditto, shift is less than or equal to 6.
>>>>
>>>>
>>>> Regards
>>>>
>>>> Miao Hao
>>>>
>>>>>> +        base = ldq_phys(cs->as, phys);
>>>>>> +    }
>>>>>> +
>>>>>> +    /* TODO: check plv and other bits? */
>>>>>> +
>>>>>> +    /* base is pte, in normal pte format */
>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>>> +        return TLBRET_NOMATCH;
>>>>>> +    }
>>>>>> +
>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>>> +        *prot = PAGE_READ;
>>>>>> +    } else {
>>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>>> +    }
>>>>>> +
>>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>>> +    /* mask RPLV, NX, NR bits */
>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>>> +    /* mask other attribute bits */
>>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>>> +
>>>>>> +    return 0;
>>>>>> +}
>>>>>> +
>>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>>> *physical,
>>>>>>                                    int *prot, target_ulong address,
>>>>>> -                                 MMUAccessType access_type, int 
>>>>>> mmu_idx)
>>>>>> +                                 MMUAccessType access_type, int 
>>>>>> mmu_idx,
>>>>>> +                                 int is_debug)
>>>>>>   {
>>>>>>       int index, match;
>>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>       if (match) {
>>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>>                                          address, access_type, 
>>>>>> index, mmu_idx);
>>>>>> +    } else if (is_debug) {
>>>>>> +        /*
>>>>>> +         * For debugger memory access, we want to do the map 
>>>>>> when there is a
>>>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>>>> return 0 if
>>>>>> +         * there is a valid map, else none zero.
>>>>>> +         */
>>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>>> address);
>>>>>>       }
>>>>>>         return TLBRET_NOMATCH;
>>>>>> @@ -158,7 +251,8 @@ static int 
>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>   #else
>>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>>> *physical,
>>>>>>                                    int *prot, target_ulong address,
>>>>>> -                                 MMUAccessType access_type, int 
>>>>>> mmu_idx)
>>>>>> +                                 MMUAccessType access_type, int 
>>>>>> mmu_idx,
>>>>>> +                                 int is_debug)
>>>>>>   {
>>>>>>       return TLBRET_NOMATCH;
>>>>>>   }
>>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState 
>>>>>> *env, target_ulong va,
>>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr 
>>>>>> *physical,
>>>>>>                            int *prot, target_ulong address,
>>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>>>> int is_debug)
>>>>>>   {
>>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>>> *env, hwaddr *physical,
>>>>>>         /* Mapped address */
>>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>>> -                                 access_type, mmu_idx);
>>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>>   }
>>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr 
>>>>>> addr)
>>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>>       int prot;
>>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>>> MMU_DATA_LOAD,
>>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>>           return -1;
>>>>>>       }
>>>>>>       return phys_addr;
>>>>


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 2 months, 3 weeks ago

On 2025/1/14 下午5:55, Miao Hao wrote:
> 
> On 2025/1/14 17:00, bibo mao wrote:
>> Miao,
>>
>> What is status about this patch? Will there be updated version?
>>
>> Regards
>> Bibo Mao
> 
> Sorry, I'm waiting for your reply. I have just updated the patch for 
> version 3.
That is fine :)

PTE width is only 64 bit now, 128 bit and others are removed. You can 
rebase it on latest version.

> 
> 
> Regards
> 
> Miao Hao
> 
>>
>> On 2025/1/2 下午2:33, Miao Hao wrote:
>>>
>>> On 2024/12/31 19:29, bibo mao wrote:
>>>>
>>>>
>>>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>>>> Hi Bibo,
>>>>>
>>>>> Thanks for your review. I apologize for my late respond due to some 
>>>>> personal reasons.
>>>>>
>>>>> On 2024/12/19 17:57, bibo mao wrote:
>>>>>> Hi Miao,
>>>>>>
>>>>>> Thanks for doing this. It is useful to debug VM.
>>>>>>
>>>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>>>> ---
>>>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>>>> ++++++++++++++++++++++++++++--
>>>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>>>
>>>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>>>> b/target/loongarch/cpu_helper.c
>>>>>>> index 580362ac3e..c0828a813d 100644
>>>>>>> --- a/target/loongarch/cpu_helper.c
>>>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>>>> *env, target_ulong vaddr,
>>>>>>>       return false;
>>>>>>>   }
>>>>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>>>>> hwaddr *physical,
>>>>>>> +                                 int *prot, target_ulong address)
>>>>>>> +{
>>>>>>> +    CPUState *cs = env_cpu(env);
>>>>>>> +    target_ulong index, phys;
>>>>>>> +    int shift;
>>>>>>> +    uint64_t dir_base, dir_width;
>>>>>>> +    uint64_t base;
>>>>>>> +    int level;
>>>>>>> +
>>>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>>> +    shift = (shift + 1) * 3;
>>>>>
>>>>> the assignment of variable shift and the corresponding comment is 
>>>>> incorrect here, and details are logged in the v1.03 change log of 
>>>>> LoongArch specification volume1 
>>>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>>>
>>>>>
>>>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>      shift = shift + 3;
>>>>>
>>>>>>> +
>>>>>>> +    if ((address >> 63) & 0x1) {
>>>>>>> +        base = env->CSR_PGDH;
>>>>>>> +    } else {
>>>>>>> +        base = env->CSR_PGDL;
>>>>>>> +    }
>>>>>>> +    base &= TARGET_PHYS_MASK;
>>>>>>> +
>>>>>>> +    for (level = 4; level > 0; level--) {
>>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>>>> +
>>>>>>> +        if (dir_width != 0) {
>>>>>> how about check whether it equeal to 0 firstly like this?
>>>>>>            if (dir_width == 0)
>>>>>>                continue;
>>>>>>
>>>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>>>> +            /* get next level page directory */
>>>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>>> +            phys = base | index << shift;
>>>>>> Here will only load first 64bit if shift is not 0, such as 
>>>>>> 1:128bit, 2:192bit, 3:256bit
>>>>>>
>>>>> After fixing the assignment of shift, this issue no longer exists. 
>>>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index 
>>>>> << shift is at most 12 bit.
>>>>>
>>>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>>> +                /* mask off page dir permission bits */
>>>>>>> +                base &= TARGET_PAGE_MASK;
>>>>>>> +            } else {
>>>>>>> +                /* base is a huge pte */
>>>>>>> +                break;
>>>>>>> +            }
>>>>>>> +
>>>>>>> +            if (base == 0) {
>>>>>> physical adddress 0 is valid and Valid bit will be checked in 
>>>>>> later. Can we remove this?
>>>>> the value of base equals to 0 means that the current page directory 
>>>>> entry does not point to next level page directory, so we return here
>>>> There is no document about page directory entry with value 0, do I 
>>>> miss something?
>>>>
>>>> In theory physical address 0 is valid, page with physical address 0 
>>>> can be set as page directory entry.
>>>>
>>>> Regards
>>>> Bibo Mao
>>>>
>>> OK, it seems that entries in page directory table does not have 
>>> attribute bits, so we can remove this.
>>>
>>>
>>> Regards
>>>
>>> Miao Hao
>>>
>>>>>>> +                return TLBRET_NOMATCH;
>>>>>>> +            }
>>>>>>
>>>>>>> +        }
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    /* pte */
>>>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>>> +        /* Huge Page. base is pte */
>>>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>>>> +        }
>>>>>>> +    } else {
>>>>>>> +        /* Normal Page. base points to pte */
>>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>>> +        phys = base | index << shift;
>>>>>> Ditto, shift may be wider than 64-bit.
>>>>>>
>>>>>> Regards
>>>>>> Bibo Mao
>>>>>
>>>>> Ditto, shift is less than or equal to 6.
>>>>>
>>>>>
>>>>> Regards
>>>>>
>>>>> Miao Hao
>>>>>
>>>>>>> +        base = ldq_phys(cs->as, phys);
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    /* TODO: check plv and other bits? */
>>>>>>> +
>>>>>>> +    /* base is pte, in normal pte format */
>>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>>>> +        return TLBRET_NOMATCH;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>>>> +        *prot = PAGE_READ;
>>>>>>> +    } else {
>>>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>>>> +    /* mask RPLV, NX, NR bits */
>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>>>> +    /* mask other attribute bits */
>>>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>>>> +
>>>>>>> +    return 0;
>>>>>>> +}
>>>>>>> +
>>>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>>>> *physical,
>>>>>>>                                    int *prot, target_ulong address,
>>>>>>> -                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx)
>>>>>>> +                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx,
>>>>>>> +                                 int is_debug)
>>>>>>>   {
>>>>>>>       int index, match;
>>>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>>       if (match) {
>>>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>>>                                          address, access_type, 
>>>>>>> index, mmu_idx);
>>>>>>> +    } else if (is_debug) {
>>>>>>> +        /*
>>>>>>> +         * For debugger memory access, we want to do the map 
>>>>>>> when there is a
>>>>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>>>>> return 0 if
>>>>>>> +         * there is a valid map, else none zero.
>>>>>>> +         */
>>>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>>>> address);
>>>>>>>       }
>>>>>>>         return TLBRET_NOMATCH;
>>>>>>> @@ -158,7 +251,8 @@ static int 
>>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>>   #else
>>>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>>>> *physical,
>>>>>>>                                    int *prot, target_ulong address,
>>>>>>> -                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx)
>>>>>>> +                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx,
>>>>>>> +                                 int is_debug)
>>>>>>>   {
>>>>>>>       return TLBRET_NOMATCH;
>>>>>>>   }
>>>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState 
>>>>>>> *env, target_ulong va,
>>>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr 
>>>>>>> *physical,
>>>>>>>                            int *prot, target_ulong address,
>>>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>>>>> int is_debug)
>>>>>>>   {
>>>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>>>> *env, hwaddr *physical,
>>>>>>>         /* Mapped address */
>>>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>>>> -                                 access_type, mmu_idx);
>>>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>>>   }
>>>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr 
>>>>>>> addr)
>>>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>>>       int prot;
>>>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>>>> MMU_DATA_LOAD,
>>>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>>>           return -1;
>>>>>>>       }
>>>>>>>       return phys_addr;
>>>>>
> 


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 3 months ago

On 2024/12/30 下午3:04, Miao Hao wrote:
> Hi Bibo,
> 
> Thanks for your review. I apologize for my late respond due to some 
> personal reasons.
> 
> On 2024/12/19 17:57, bibo mao wrote:
>> Hi Miao,
>>
>> Thanks for doing this. It is useful to debug VM.
>>
>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>> ---
>>>   target/loongarch/cpu_helper.c     | 104 ++++++++++++++++++++++++++++--
>>>   target/loongarch/internals.h      |   4 +-
>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/target/loongarch/cpu_helper.c 
>>> b/target/loongarch/cpu_helper.c
>>> index 580362ac3e..c0828a813d 100644
>>> --- a/target/loongarch/cpu_helper.c
>>> +++ b/target/loongarch/cpu_helper.c
>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>> *env, target_ulong vaddr,
>>>       return false;
>>>   }
>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>> hwaddr *physical,
>>> +                                 int *prot, target_ulong address)
>>> +{
>>> +    CPUState *cs = env_cpu(env);
>>> +    target_ulong index, phys;
>>> +    int shift;
>>> +    uint64_t dir_base, dir_width;
>>> +    uint64_t base;
>>> +    int level;
>>> +
>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>> +    shift = (shift + 1) * 3;
> 
> the assignment of variable shift and the corresponding comment is 
> incorrect here, and details are logged in the v1.03 change log of 
> LoongArch specification volume1 
> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
> 
> 
>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>      shift = shift + 3;
Ok, I see.
It seems that this is right, thanks for the detailed explanation.

> 
>>> +
>>> +    if ((address >> 63) & 0x1) {
>>> +        base = env->CSR_PGDH;
>>> +    } else {
>>> +        base = env->CSR_PGDL;
>>> +    }
>>> +    base &= TARGET_PHYS_MASK;
>>> +
>>> +    for (level = 4; level > 0; level--) {
>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>> +
>>> +        if (dir_width != 0) {
>> how about check whether it equeal to 0 firstly like this?
>>            if (dir_width == 0)
>>                continue;
>>
> It's good to reduce code nesting, I will adopt this suggestion.
>>> +            /* get next level page directory */
>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>> +            phys = base | index << shift;
>> Here will only load first 64bit if shift is not 0, such as 1:128bit, 
>> 2:192bit, 3:256bit
>>
> After fixing the assignment of shift, this issue no longer exists. Shift 
> is less than or equal to 6, and index is 6 bit. Thus, index << shift is 
> at most 12 bit.
Supposing one pte entry is 128bit, value of shift is 4.
    phys = base | index << shift;  will be the same as
    phys = base | index * 16;
however all accessed pte entry is 16 bytes aligned, pte entry with "phys 
= base | index * 16 + 8" will never be accessed. Is that right?

I think it should be something like this.
       index = (address >> dir_base) & ((1 << (dir_width + shift) - 1);
       phys = base | index << 3;

Regards
Bibo, Mao
> 
>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>> +                /* mask off page dir permission bits */
>>> +                base &= TARGET_PAGE_MASK;
>>> +            } else {
>>> +                /* base is a huge pte */
>>> +                break;
>>> +            }
>>> +
>>> +            if (base == 0) {
>> physical adddress 0 is valid and Valid bit will be checked in later. 
>> Can we remove this?
> the value of base equals to 0 means that the current page directory 
> entry does not point to next level page directory, so we return here
>>> +                return TLBRET_NOMATCH;
>>> +            }
>>
>>> +        }
>>> +    }
>>> +
>>> +    /* pte */
>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>> +        /* Huge Page. base is pte */
>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>> +        }
>>> +    } else {
>>> +        /* Normal Page. base points to pte */
>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>> +        phys = base | index << shift;
>> Ditto, shift may be wider than 64-bit.
>>
>> Regards
>> Bibo Mao
> 
> Ditto, shift is less than or equal to 6.
> 
> 
> Regards
> 
> Miao Hao
> 
>>> +        base = ldq_phys(cs->as, phys);
>>> +    }
>>> +
>>> +    /* TODO: check plv and other bits? */
>>> +
>>> +    /* base is pte, in normal pte format */
>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>> +        return TLBRET_NOMATCH;
>>> +    }
>>> +
>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>> +        *prot = PAGE_READ;
>>> +    } else {
>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>> +    }
>>> +
>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>> +    /* mask RPLV, NX, NR bits */
>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>> +    /* mask other attribute bits */
>>> +    *physical = base & TARGET_PAGE_MASK;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>> *physical,
>>>                                    int *prot, target_ulong address,
>>> -                                 MMUAccessType access_type, int 
>>> mmu_idx)
>>> +                                 MMUAccessType access_type, int 
>>> mmu_idx,
>>> +                                 int is_debug)
>>>   {
>>>       int index, match;
>>>   @@ -151,6 +237,13 @@ static int 
>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>       if (match) {
>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>                                          address, access_type, index, 
>>> mmu_idx);
>>> +    } else if (is_debug) {
>>> +        /*
>>> +         * For debugger memory access, we want to do the map when 
>>> there is a
>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>> return 0 if
>>> +         * there is a valid map, else none zero.
>>> +         */
>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>> address);
>>>       }
>>>         return TLBRET_NOMATCH;
>>> @@ -158,7 +251,8 @@ static int 
>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>   #else
>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>> *physical,
>>>                                    int *prot, target_ulong address,
>>> -                                 MMUAccessType access_type, int 
>>> mmu_idx)
>>> +                                 MMUAccessType access_type, int 
>>> mmu_idx,
>>> +                                 int is_debug)
>>>   {
>>>       return TLBRET_NOMATCH;
>>>   }
>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, 
>>> target_ulong va,
>>>     int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>>>                            int *prot, target_ulong address,
>>> -                         MMUAccessType access_type, int mmu_idx)
>>> +                         MMUAccessType access_type, int mmu_idx, int 
>>> is_debug)
>>>   {
>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState *env, 
>>> hwaddr *physical,
>>>         /* Mapped address */
>>>       return loongarch_map_address(env, physical, prot, address,
>>> -                                 access_type, mmu_idx);
>>> +                                 access_type, mmu_idx, is_debug);
>>>   }
>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>> @@ -232,7 +326,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState 
>>> *cs, vaddr addr)
>>>       int prot;
>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>> MMU_DATA_LOAD,
>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>           return -1;
>>>       }
>>>       return phys_addr;


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 3 months ago

On 2024/12/31 上午9:09, bibo mao wrote:
> 
> 
> On 2024/12/30 下午3:04, Miao Hao wrote:
>> Hi Bibo,
>>
>> Thanks for your review. I apologize for my late respond due to some 
>> personal reasons.
>>
>> On 2024/12/19 17:57, bibo mao wrote:
>>> Hi Miao,
>>>
>>> Thanks for doing this. It is useful to debug VM.
>>>
>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>> ---
>>>>   target/loongarch/cpu_helper.c     | 104 
>>>> ++++++++++++++++++++++++++++--
>>>>   target/loongarch/internals.h      |   4 +-
>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>
>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>> b/target/loongarch/cpu_helper.c
>>>> index 580362ac3e..c0828a813d 100644
>>>> --- a/target/loongarch/cpu_helper.c
>>>> +++ b/target/loongarch/cpu_helper.c
>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>> *env, target_ulong vaddr,
>>>>       return false;
>>>>   }
>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>> hwaddr *physical,
>>>> +                                 int *prot, target_ulong address)
>>>> +{
>>>> +    CPUState *cs = env_cpu(env);
>>>> +    target_ulong index, phys;
>>>> +    int shift;
>>>> +    uint64_t dir_base, dir_width;
>>>> +    uint64_t base;
>>>> +    int level;
>>>> +
>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>> +    shift = (shift + 1) * 3;
>>
>> the assignment of variable shift and the corresponding comment is 
>> incorrect here, and details are logged in the v1.03 change log of 
>> LoongArch specification volume1 
>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>
>>
>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>      shift = shift + 3;
> Ok, I see.
> It seems that this is right, thanks for the detailed explanation.
> 
>>
>>>> +
>>>> +    if ((address >> 63) & 0x1) {
>>>> +        base = env->CSR_PGDH;
>>>> +    } else {
>>>> +        base = env->CSR_PGDL;
>>>> +    }
>>>> +    base &= TARGET_PHYS_MASK;
>>>> +
>>>> +    for (level = 4; level > 0; level--) {
>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>> +
>>>> +        if (dir_width != 0) {
>>> how about check whether it equeal to 0 firstly like this?
>>>            if (dir_width == 0)
>>>                continue;
>>>
>> It's good to reduce code nesting, I will adopt this suggestion.
>>>> +            /* get next level page directory */
>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>> +            phys = base | index << shift;
>>> Here will only load first 64bit if shift is not 0, such as 1:128bit, 
>>> 2:192bit, 3:256bit
>>>
>> After fixing the assignment of shift, this issue no longer exists. 
>> Shift is less than or equal to 6, and index is 6 bit. Thus, index << 
>> shift is at most 12 bit.
> Supposing one pte entry is 128bit, value of shift is 4.
>     phys = base | index << shift;  will be the same as
>     phys = base | index * 16;
> however all accessed pte entry is 16 bytes aligned, pte entry with "phys 
> = base | index * 16 + 8" will never be accessed. Is that right?
> 
> I think it should be something like this.
>        index = (address >> dir_base) & ((1 << (dir_width + shift) - 1);
>        phys = base | index << 3;
Sorry, it should be shift - 3.
  index = (address >> dir_base) & (1 << (dir_width + shift - 3) - 1);
  phys = base | index << 3;

> 
> Regards
> Bibo, Mao
>>
>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>> +                /* mask off page dir permission bits */
>>>> +                base &= TARGET_PAGE_MASK;
>>>> +            } else {
>>>> +                /* base is a huge pte */
>>>> +                break;
>>>> +            }
>>>> +
>>>> +            if (base == 0) {
>>> physical adddress 0 is valid and Valid bit will be checked in later. 
>>> Can we remove this?
>> the value of base equals to 0 means that the current page directory 
>> entry does not point to next level page directory, so we return here
>>>> +                return TLBRET_NOMATCH;
>>>> +            }
>>>
>>>> +        }
>>>> +    }
>>>> +
>>>> +    /* pte */
>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>> +        /* Huge Page. base is pte */
>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>> +        }
>>>> +    } else {
>>>> +        /* Normal Page. base points to pte */
>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>> +        phys = base | index << shift;
>>> Ditto, shift may be wider than 64-bit.
>>>
>>> Regards
>>> Bibo Mao
>>
>> Ditto, shift is less than or equal to 6.
>>
>>
>> Regards
>>
>> Miao Hao
>>
>>>> +        base = ldq_phys(cs->as, phys);
>>>> +    }
>>>> +
>>>> +    /* TODO: check plv and other bits? */
>>>> +
>>>> +    /* base is pte, in normal pte format */
>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>> +        return TLBRET_NOMATCH;
>>>> +    }
>>>> +
>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>> +        *prot = PAGE_READ;
>>>> +    } else {
>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>> +    }
>>>> +
>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>> +    /* mask RPLV, NX, NR bits */
>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>> +    /* mask other attribute bits */
>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>> *physical,
>>>>                                    int *prot, target_ulong address,
>>>> -                                 MMUAccessType access_type, int 
>>>> mmu_idx)
>>>> +                                 MMUAccessType access_type, int 
>>>> mmu_idx,
>>>> +                                 int is_debug)
>>>>   {
>>>>       int index, match;
>>>>   @@ -151,6 +237,13 @@ static int 
>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>       if (match) {
>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>                                          address, access_type, 
>>>> index, mmu_idx);
>>>> +    } else if (is_debug) {
>>>> +        /*
>>>> +         * For debugger memory access, we want to do the map when 
>>>> there is a
>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>> return 0 if
>>>> +         * there is a valid map, else none zero.
>>>> +         */
>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>> address);
>>>>       }
>>>>         return TLBRET_NOMATCH;
>>>> @@ -158,7 +251,8 @@ static int 
>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>   #else
>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>> *physical,
>>>>                                    int *prot, target_ulong address,
>>>> -                                 MMUAccessType access_type, int 
>>>> mmu_idx)
>>>> +                                 MMUAccessType access_type, int 
>>>> mmu_idx,
>>>> +                                 int is_debug)
>>>>   {
>>>>       return TLBRET_NOMATCH;
>>>>   }
>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, 
>>>> target_ulong va,
>>>>     int get_physical_address(CPULoongArchState *env, hwaddr *physical,
>>>>                            int *prot, target_ulong address,
>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>> int is_debug)
>>>>   {
>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState *env, 
>>>> hwaddr *physical,
>>>>         /* Mapped address */
>>>>       return loongarch_map_address(env, physical, prot, address,
>>>> -                                 access_type, mmu_idx);
>>>> +                                 access_type, mmu_idx, is_debug);
>>>>   }
>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>       int prot;
>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>> MMU_DATA_LOAD,
>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>           return -1;
>>>>       }
>>>>       return phys_addr;


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by Miao Hao 3 months ago
On 2024/12/31 09:18, bibo mao wrote:
>
>
> On 2024/12/31 上午9:09, bibo mao wrote:
>>
>>
>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>> Hi Bibo,
>>>
>>> Thanks for your review. I apologize for my late respond due to some 
>>> personal reasons.
>>>
>>> On 2024/12/19 17:57, bibo mao wrote:
>>>> Hi Miao,
>>>>
>>>> Thanks for doing this. It is useful to debug VM.
>>>>
>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>> ---
>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>> ++++++++++++++++++++++++++++--
>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>
>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>> b/target/loongarch/cpu_helper.c
>>>>> index 580362ac3e..c0828a813d 100644
>>>>> --- a/target/loongarch/cpu_helper.c
>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>> *env, target_ulong vaddr,
>>>>>       return false;
>>>>>   }
>>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>>> hwaddr *physical,
>>>>> +                                 int *prot, target_ulong address)
>>>>> +{
>>>>> +    CPUState *cs = env_cpu(env);
>>>>> +    target_ulong index, phys;
>>>>> +    int shift;
>>>>> +    uint64_t dir_base, dir_width;
>>>>> +    uint64_t base;
>>>>> +    int level;
>>>>> +
>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>> +    shift = (shift + 1) * 3;
>>>
>>> the assignment of variable shift and the corresponding comment is 
>>> incorrect here, and details are logged in the v1.03 change log of 
>>> LoongArch specification volume1 
>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>
>>>
>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>      shift = shift + 3;
>> Ok, I see.
>> It seems that this is right, thanks for the detailed explanation.
>>
>>>
>>>>> +
>>>>> +    if ((address >> 63) & 0x1) {
>>>>> +        base = env->CSR_PGDH;
>>>>> +    } else {
>>>>> +        base = env->CSR_PGDL;
>>>>> +    }
>>>>> +    base &= TARGET_PHYS_MASK;
>>>>> +
>>>>> +    for (level = 4; level > 0; level--) {
>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>> +
>>>>> +        if (dir_width != 0) {
>>>> how about check whether it equeal to 0 firstly like this?
>>>>            if (dir_width == 0)
>>>>                continue;
>>>>
>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>> +            /* get next level page directory */
>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>> +            phys = base | index << shift;
>>>> Here will only load first 64bit if shift is not 0, such as 
>>>> 1:128bit, 2:192bit, 3:256bit
>>>>
>>> After fixing the assignment of shift, this issue no longer exists. 
>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index << 
>>> shift is at most 12 bit.
>> Supposing one pte entry is 128bit, value of shift is 4.
>>     phys = base | index << shift;  will be the same as
>>     phys = base | index * 16;
>> however all accessed pte entry is 16 bytes aligned, pte entry with 
>> "phys = base | index * 16 + 8" will never be accessed. Is that right?
>>
I see what you mean, but since all accessed pte entry is 16 bytes 
aligned, why there is a pte entry with "phys = base | index * 16 + 8"? 
On the other hand, a 128 bit pte entry remains undefined in LoongArch 
specification volume1, which only defines 64 bit pte entry (in section 
5.4.5). Perhaps the CSR_PWCL.PTEWIDTH is for future extension, so I 
think we can just leave it alone since it works well on 64 bit pte.
>> I think it should be something like this.
>>        index = (address >> dir_base) & ((1 << (dir_width + shift) - 1);
>>        phys = base | index << 3;
> Sorry, it should be shift - 3.
>  index = (address >> dir_base) & (1 << (dir_width + shift - 3) - 1);
>  phys = base | index << 3;
>
In my understanding, assignment "index = (address >> dir_base) & ((1 << 
dir_width) - 1)" retrieves the corresponding bits in the virtual address 
set in CSR_PWC[L|H] as the index (Figure 5-2 in LoongArch specification 
volume1), and CSR_PWCL.PTEWIDTH pads zeros to retrieved bits to align.

Here's an example, a 3 level page table with 16KB pages in Loongarch 
linux setup:

virtual address:

+---------------+---------------+---------------+---------------+

|46               36|35               25|24 14|13                 0|

+---------------+---------------+---------------+---------------+

    dir3_index    dir1_index    pt_index      page_offset

CSR_PWCL:

|00|00000|00000|11001|01011|01110|01011|

CSR_PWCH:

|0000000|0|000000|000000|100100|001011|

Note that dir3_index, dir1_index and pt_index are 11 bit and 
CSR_PWCL.PTEWidth=0. To index a 64 bit pte in the 16KB(2^14B) page 
table, we should pad (0 + 3) zeros to pt_index.


Regards

Miao, Hao

>>
>> Regards
>> Bibo, Mao
>>>
>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>> +                /* mask off page dir permission bits */
>>>>> +                base &= TARGET_PAGE_MASK;
>>>>> +            } else {
>>>>> +                /* base is a huge pte */
>>>>> +                break;
>>>>> +            }
>>>>> +
>>>>> +            if (base == 0) {
>>>> physical adddress 0 is valid and Valid bit will be checked in 
>>>> later. Can we remove this?
>>> the value of base equals to 0 means that the current page directory 
>>> entry does not point to next level page directory, so we return here
>>>>> +                return TLBRET_NOMATCH;
>>>>> +            }
>>>>
>>>>> +        }
>>>>> +    }
>>>>> +
>>>>> +    /* pte */
>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>> +        /* Huge Page. base is pte */
>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>> +        }
>>>>> +    } else {
>>>>> +        /* Normal Page. base points to pte */
>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>> +        phys = base | index << shift;
>>>> Ditto, shift may be wider than 64-bit.
>>>>
>>>> Regards
>>>> Bibo Mao
>>>
>>> Ditto, shift is less than or equal to 6.
>>>
>>>
>>> Regards
>>>
>>> Miao Hao
>>>
>>>>> +        base = ldq_phys(cs->as, phys);
>>>>> +    }
>>>>> +
>>>>> +    /* TODO: check plv and other bits? */
>>>>> +
>>>>> +    /* base is pte, in normal pte format */
>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>> +        return TLBRET_NOMATCH;
>>>>> +    }
>>>>> +
>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>> +        *prot = PAGE_READ;
>>>>> +    } else {
>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>> +    }
>>>>> +
>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>> +    /* mask RPLV, NX, NR bits */
>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>> +    /* mask other attribute bits */
>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>> +
>>>>> +    return 0;
>>>>> +}
>>>>> +
>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>> *physical,
>>>>>                                    int *prot, target_ulong address,
>>>>> -                                 MMUAccessType access_type, int 
>>>>> mmu_idx)
>>>>> +                                 MMUAccessType access_type, int 
>>>>> mmu_idx,
>>>>> +                                 int is_debug)
>>>>>   {
>>>>>       int index, match;
>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>       if (match) {
>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>                                          address, access_type, 
>>>>> index, mmu_idx);
>>>>> +    } else if (is_debug) {
>>>>> +        /*
>>>>> +         * For debugger memory access, we want to do the map when 
>>>>> there is a
>>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>>> return 0 if
>>>>> +         * there is a valid map, else none zero.
>>>>> +         */
>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>> address);
>>>>>       }
>>>>>         return TLBRET_NOMATCH;
>>>>> @@ -158,7 +251,8 @@ static int 
>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>   #else
>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>> *physical,
>>>>>                                    int *prot, target_ulong address,
>>>>> -                                 MMUAccessType access_type, int 
>>>>> mmu_idx)
>>>>> +                                 MMUAccessType access_type, int 
>>>>> mmu_idx,
>>>>> +                                 int is_debug)
>>>>>   {
>>>>>       return TLBRET_NOMATCH;
>>>>>   }
>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState 
>>>>> *env, target_ulong va,
>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr 
>>>>> *physical,
>>>>>                            int *prot, target_ulong address,
>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>>> int is_debug)
>>>>>   {
>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>> *env, hwaddr *physical,
>>>>>         /* Mapped address */
>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>> -                                 access_type, mmu_idx);
>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>   }
>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr 
>>>>> addr)
>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>       int prot;
>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>> MMU_DATA_LOAD,
>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>           return -1;
>>>>>       }
>>>>>       return phys_addr;


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 3 months ago

On 2024/12/31 上午10:08, Miao Hao wrote:
> 
> On 2024/12/31 09:18, bibo mao wrote:
>>
>>
>> On 2024/12/31 上午9:09, bibo mao wrote:
>>>
>>>
>>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>>> Hi Bibo,
>>>>
>>>> Thanks for your review. I apologize for my late respond due to some 
>>>> personal reasons.
>>>>
>>>> On 2024/12/19 17:57, bibo mao wrote:
>>>>> Hi Miao,
>>>>>
>>>>> Thanks for doing this. It is useful to debug VM.
>>>>>
>>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>>> ---
>>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>>> ++++++++++++++++++++++++++++--
>>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>>
>>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>>> b/target/loongarch/cpu_helper.c
>>>>>> index 580362ac3e..c0828a813d 100644
>>>>>> --- a/target/loongarch/cpu_helper.c
>>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>>> *env, target_ulong vaddr,
>>>>>>       return false;
>>>>>>   }
>>>>>>   +static int loongarch_page_table_walker(CPULoongArchState *env, 
>>>>>> hwaddr *physical,
>>>>>> +                                 int *prot, target_ulong address)
>>>>>> +{
>>>>>> +    CPUState *cs = env_cpu(env);
>>>>>> +    target_ulong index, phys;
>>>>>> +    int shift;
>>>>>> +    uint64_t dir_base, dir_width;
>>>>>> +    uint64_t base;
>>>>>> +    int level;
>>>>>> +
>>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>> +    shift = (shift + 1) * 3;
>>>>
>>>> the assignment of variable shift and the corresponding comment is 
>>>> incorrect here, and details are logged in the v1.03 change log of 
>>>> LoongArch specification volume1 
>>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>>
>>>>
>>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>      shift = shift + 3;
>>> Ok, I see.
>>> It seems that this is right, thanks for the detailed explanation.
>>>
>>>>
>>>>>> +
>>>>>> +    if ((address >> 63) & 0x1) {
>>>>>> +        base = env->CSR_PGDH;
>>>>>> +    } else {
>>>>>> +        base = env->CSR_PGDL;
>>>>>> +    }
>>>>>> +    base &= TARGET_PHYS_MASK;
>>>>>> +
>>>>>> +    for (level = 4; level > 0; level--) {
>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>>> +
>>>>>> +        if (dir_width != 0) {
>>>>> how about check whether it equeal to 0 firstly like this?
>>>>>            if (dir_width == 0)
>>>>>                continue;
>>>>>
>>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>>> +            /* get next level page directory */
>>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>> +            phys = base | index << shift;
>>>>> Here will only load first 64bit if shift is not 0, such as 
>>>>> 1:128bit, 2:192bit, 3:256bit
>>>>>
>>>> After fixing the assignment of shift, this issue no longer exists. 
>>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index << 
>>>> shift is at most 12 bit.
>>> Supposing one pte entry is 128bit, value of shift is 4.
>>>     phys = base | index << shift;  will be the same as
>>>     phys = base | index * 16;
>>> however all accessed pte entry is 16 bytes aligned, pte entry with 
>>> "phys = base | index * 16 + 8" will never be accessed. Is that right?
>>>
> I see what you mean, but since all accessed pte entry is 16 bytes 
> aligned, why there is a pte entry with "phys = base | index * 16 + 8"? 
> On the other hand, a 128 bit pte entry remains undefined in LoongArch 
> specification volume1, which only defines 64 bit pte entry (in section 
> 5.4.5). Perhaps the CSR_PWCL.PTEWIDTH is for future extension, so I 
> think we can just leave it alone since it works well on 64 bit pte.
IIRC, for 128 bit pte entry, it presents two pages. physical address of 
page0 is located phys = base | index * 16, page1 can be located at
   phys = base | index * 16 + 8

If we want to only care about 64 bit pte, the following sentences are 
suggested to removed.
    +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
    +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
    +    shift = (shift + 1) * 3;

Regards
Bibo Mao
>>> I think it should be something like this.
>>>        index = (address >> dir_base) & ((1 << (dir_width + shift) - 1);
>>>        phys = base | index << 3;
>> Sorry, it should be shift - 3.
>>  index = (address >> dir_base) & (1 << (dir_width + shift - 3) - 1);
>>  phys = base | index << 3;
>>
> In my understanding, assignment "index = (address >> dir_base) & ((1 << 
> dir_width) - 1)" retrieves the corresponding bits in the virtual address 
> set in CSR_PWC[L|H] as the index (Figure 5-2 in LoongArch specification 
> volume1), and CSR_PWCL.PTEWIDTH pads zeros to retrieved bits to align.
> 
> Here's an example, a 3 level page table with 16KB pages in Loongarch 
> linux setup:
> 
> virtual address:
> 
> +---------------+---------------+---------------+---------------+
> 
> |46               36|35               25|24 14|13                 0|
> 
> +---------------+---------------+---------------+---------------+
> 
>     dir3_index    dir1_index    pt_index      page_offset
> 
> CSR_PWCL:
> 
> |00|00000|00000|11001|01011|01110|01011|
> 
> CSR_PWCH:
> 
> |0000000|0|000000|000000|100100|001011|
> 
> Note that dir3_index, dir1_index and pt_index are 11 bit and 
> CSR_PWCL.PTEWidth=0. To index a 64 bit pte in the 16KB(2^14B) page 
> table, we should pad (0 + 3) zeros to pt_index.
> 
> 
> Regards
> 
> Miao, Hao
> 
>>>
>>> Regards
>>> Bibo, Mao
>>>>
>>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>> +                /* mask off page dir permission bits */
>>>>>> +                base &= TARGET_PAGE_MASK;
>>>>>> +            } else {
>>>>>> +                /* base is a huge pte */
>>>>>> +                break;
>>>>>> +            }
>>>>>> +
>>>>>> +            if (base == 0) {
>>>>> physical adddress 0 is valid and Valid bit will be checked in 
>>>>> later. Can we remove this?
>>>> the value of base equals to 0 means that the current page directory 
>>>> entry does not point to next level page directory, so we return here
>>>>>> +                return TLBRET_NOMATCH;
>>>>>> +            }
>>>>>
>>>>>> +        }
>>>>>> +    }
>>>>>> +
>>>>>> +    /* pte */
>>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>> +        /* Huge Page. base is pte */
>>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>>> +        }
>>>>>> +    } else {
>>>>>> +        /* Normal Page. base points to pte */
>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>> +        phys = base | index << shift;
>>>>> Ditto, shift may be wider than 64-bit.
>>>>>
>>>>> Regards
>>>>> Bibo Mao
>>>>
>>>> Ditto, shift is less than or equal to 6.
>>>>
>>>>
>>>> Regards
>>>>
>>>> Miao Hao
>>>>
>>>>>> +        base = ldq_phys(cs->as, phys);
>>>>>> +    }
>>>>>> +
>>>>>> +    /* TODO: check plv and other bits? */
>>>>>> +
>>>>>> +    /* base is pte, in normal pte format */
>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>>> +        return TLBRET_NOMATCH;
>>>>>> +    }
>>>>>> +
>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>>> +        *prot = PAGE_READ;
>>>>>> +    } else {
>>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>>> +    }
>>>>>> +
>>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>>> +    /* mask RPLV, NX, NR bits */
>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>>> +    /* mask other attribute bits */
>>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>>> +
>>>>>> +    return 0;
>>>>>> +}
>>>>>> +
>>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>>> *physical,
>>>>>>                                    int *prot, target_ulong address,
>>>>>> -                                 MMUAccessType access_type, int 
>>>>>> mmu_idx)
>>>>>> +                                 MMUAccessType access_type, int 
>>>>>> mmu_idx,
>>>>>> +                                 int is_debug)
>>>>>>   {
>>>>>>       int index, match;
>>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>       if (match) {
>>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>>                                          address, access_type, 
>>>>>> index, mmu_idx);
>>>>>> +    } else if (is_debug) {
>>>>>> +        /*
>>>>>> +         * For debugger memory access, we want to do the map when 
>>>>>> there is a
>>>>>> +         * legal mapping, even if the mapping is not yet in TLB. 
>>>>>> return 0 if
>>>>>> +         * there is a valid map, else none zero.
>>>>>> +         */
>>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>>> address);
>>>>>>       }
>>>>>>         return TLBRET_NOMATCH;
>>>>>> @@ -158,7 +251,8 @@ static int 
>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>   #else
>>>>>>   static int loongarch_map_address(CPULoongArchState *env, hwaddr 
>>>>>> *physical,
>>>>>>                                    int *prot, target_ulong address,
>>>>>> -                                 MMUAccessType access_type, int 
>>>>>> mmu_idx)
>>>>>> +                                 MMUAccessType access_type, int 
>>>>>> mmu_idx,
>>>>>> +                                 int is_debug)
>>>>>>   {
>>>>>>       return TLBRET_NOMATCH;
>>>>>>   }
>>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState 
>>>>>> *env, target_ulong va,
>>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr 
>>>>>> *physical,
>>>>>>                            int *prot, target_ulong address,
>>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>>> +                         MMUAccessType access_type, int mmu_idx, 
>>>>>> int is_debug)
>>>>>>   {
>>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>>> *env, hwaddr *physical,
>>>>>>         /* Mapped address */
>>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>>> -                                 access_type, mmu_idx);
>>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>>   }
>>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr 
>>>>>> addr)
>>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>>       int prot;
>>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>>> MMU_DATA_LOAD,
>>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>>           return -1;
>>>>>>       }
>>>>>>       return phys_addr;


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by Miao Hao 3 months ago
On 2024/12/31 10:23, bibo mao wrote:
>
>
> On 2024/12/31 上午10:08, Miao Hao wrote:
>>
>> On 2024/12/31 09:18, bibo mao wrote:
>>>
>>>
>>> On 2024/12/31 上午9:09, bibo mao wrote:
>>>>
>>>>
>>>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>>>> Hi Bibo,
>>>>>
>>>>> Thanks for your review. I apologize for my late respond due to 
>>>>> some personal reasons.
>>>>>
>>>>> On 2024/12/19 17:57, bibo mao wrote:
>>>>>> Hi Miao,
>>>>>>
>>>>>> Thanks for doing this. It is useful to debug VM.
>>>>>>
>>>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>>>> ---
>>>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>>>> ++++++++++++++++++++++++++++--
>>>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>>>
>>>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>>>> b/target/loongarch/cpu_helper.c
>>>>>>> index 580362ac3e..c0828a813d 100644
>>>>>>> --- a/target/loongarch/cpu_helper.c
>>>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>>>> *env, target_ulong vaddr,
>>>>>>>       return false;
>>>>>>>   }
>>>>>>>   +static int loongarch_page_table_walker(CPULoongArchState 
>>>>>>> *env, hwaddr *physical,
>>>>>>> +                                 int *prot, target_ulong address)
>>>>>>> +{
>>>>>>> +    CPUState *cs = env_cpu(env);
>>>>>>> +    target_ulong index, phys;
>>>>>>> +    int shift;
>>>>>>> +    uint64_t dir_base, dir_width;
>>>>>>> +    uint64_t base;
>>>>>>> +    int level;
>>>>>>> +
>>>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>>> +    shift = (shift + 1) * 3;
>>>>>
>>>>> the assignment of variable shift and the corresponding comment is 
>>>>> incorrect here, and details are logged in the v1.03 change log of 
>>>>> LoongArch specification volume1 
>>>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>>>
>>>>>
>>>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>      shift = shift + 3;
>>>> Ok, I see.
>>>> It seems that this is right, thanks for the detailed explanation.
>>>>
>>>>>
>>>>>>> +
>>>>>>> +    if ((address >> 63) & 0x1) {
>>>>>>> +        base = env->CSR_PGDH;
>>>>>>> +    } else {
>>>>>>> +        base = env->CSR_PGDL;
>>>>>>> +    }
>>>>>>> +    base &= TARGET_PHYS_MASK;
>>>>>>> +
>>>>>>> +    for (level = 4; level > 0; level--) {
>>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>>>> +
>>>>>>> +        if (dir_width != 0) {
>>>>>> how about check whether it equeal to 0 firstly like this?
>>>>>>            if (dir_width == 0)
>>>>>>                continue;
>>>>>>
>>>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>>>> +            /* get next level page directory */
>>>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 
>>>>>>> 1);
>>>>>>> +            phys = base | index << shift;
>>>>>> Here will only load first 64bit if shift is not 0, such as 
>>>>>> 1:128bit, 2:192bit, 3:256bit
>>>>>>
>>>>> After fixing the assignment of shift, this issue no longer exists. 
>>>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index 
>>>>> << shift is at most 12 bit.
>>>> Supposing one pte entry is 128bit, value of shift is 4.
>>>>     phys = base | index << shift;  will be the same as
>>>>     phys = base | index * 16;
>>>> however all accessed pte entry is 16 bytes aligned, pte entry with 
>>>> "phys = base | index * 16 + 8" will never be accessed. Is that right?
>>>>
>> I see what you mean, but since all accessed pte entry is 16 bytes 
>> aligned, why there is a pte entry with "phys = base | index * 16 + 
>> 8"? On the other hand, a 128 bit pte entry remains undefined in 
>> LoongArch specification volume1, which only defines 64 bit pte entry 
>> (in section 5.4.5). Perhaps the CSR_PWCL.PTEWIDTH is for future 
>> extension, so I think we can just leave it alone since it works well 
>> on 64 bit pte.
> IIRC, for 128 bit pte entry, it presents two pages. physical address 
> of page0 is located phys = base | index * 16, page1 can be located at
>   phys = base | index * 16 + 8
If a pte presents two pages, a virtual address is not mapped to a unique 
physical address. The key is the format of a 128 bit pte, which is 
undefined.
>
> If we want to only care about 64 bit pte, the following sentences are 
> suggested to removed.
>    +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>    +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>    +    shift = (shift + 1) * 3;
>
The reason for retaining this is to be consistent with helper_lddir and 
helper_ldpte in target/loongarch/tcg/tlb_helper.c


Regards

Miao Hao

> Regards
> Bibo Mao
>>>> I think it should be something like this.
>>>>        index = (address >> dir_base) & ((1 << (dir_width + shift) - 
>>>> 1);
>>>>        phys = base | index << 3;
>>> Sorry, it should be shift - 3.
>>>  index = (address >> dir_base) & (1 << (dir_width + shift - 3) - 1);
>>>  phys = base | index << 3;
>>>
>> In my understanding, assignment "index = (address >> dir_base) & ((1 
>> << dir_width) - 1)" retrieves the corresponding bits in the virtual 
>> address set in CSR_PWC[L|H] as the index (Figure 5-2 in LoongArch 
>> specification volume1), and CSR_PWCL.PTEWIDTH pads zeros to retrieved 
>> bits to align.
>>
>> Here's an example, a 3 level page table with 16KB pages in Loongarch 
>> linux setup:
>>
>> virtual address:
>>
>> +---------------+---------------+---------------+---------------+
>>
>> |46               36|35               25|24 14|13                 0|
>>
>> +---------------+---------------+---------------+---------------+
>>
>>     dir3_index    dir1_index    pt_index      page_offset
>>
>> CSR_PWCL:
>>
>> |00|00000|00000|11001|01011|01110|01011|
>>
>> CSR_PWCH:
>>
>> |0000000|0|000000|000000|100100|001011|
>>
>> Note that dir3_index, dir1_index and pt_index are 11 bit and 
>> CSR_PWCL.PTEWidth=0. To index a 64 bit pte in the 16KB(2^14B) page 
>> table, we should pad (0 + 3) zeros to pt_index.
>>
>>
>> Regards
>>
>> Miao, Hao
>>
>>>>
>>>> Regards
>>>> Bibo, Mao
>>>>>
>>>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>>> +                /* mask off page dir permission bits */
>>>>>>> +                base &= TARGET_PAGE_MASK;
>>>>>>> +            } else {
>>>>>>> +                /* base is a huge pte */
>>>>>>> +                break;
>>>>>>> +            }
>>>>>>> +
>>>>>>> +            if (base == 0) {
>>>>>> physical adddress 0 is valid and Valid bit will be checked in 
>>>>>> later. Can we remove this?
>>>>> the value of base equals to 0 means that the current page 
>>>>> directory entry does not point to next level page directory, so we 
>>>>> return here
>>>>>>> +                return TLBRET_NOMATCH;
>>>>>>> +            }
>>>>>>
>>>>>>> +        }
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    /* pte */
>>>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>>> +        /* Huge Page. base is pte */
>>>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>>>> +        }
>>>>>>> +    } else {
>>>>>>> +        /* Normal Page. base points to pte */
>>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>>> +        phys = base | index << shift;
>>>>>> Ditto, shift may be wider than 64-bit.
>>>>>>
>>>>>> Regards
>>>>>> Bibo Mao
>>>>>
>>>>> Ditto, shift is less than or equal to 6.
>>>>>
>>>>>
>>>>> Regards
>>>>>
>>>>> Miao Hao
>>>>>
>>>>>>> +        base = ldq_phys(cs->as, phys);
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    /* TODO: check plv and other bits? */
>>>>>>> +
>>>>>>> +    /* base is pte, in normal pte format */
>>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>>>> +        return TLBRET_NOMATCH;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>>>> +        *prot = PAGE_READ;
>>>>>>> +    } else {
>>>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>>>> +    /* mask RPLV, NX, NR bits */
>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>>>> +    /* mask other attribute bits */
>>>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>>>> +
>>>>>>> +    return 0;
>>>>>>> +}
>>>>>>> +
>>>>>>>   static int loongarch_map_address(CPULoongArchState *env, 
>>>>>>> hwaddr *physical,
>>>>>>>                                    int *prot, target_ulong address,
>>>>>>> -                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx)
>>>>>>> +                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx,
>>>>>>> +                                 int is_debug)
>>>>>>>   {
>>>>>>>       int index, match;
>>>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>>       if (match) {
>>>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>>>                                          address, access_type, 
>>>>>>> index, mmu_idx);
>>>>>>> +    } else if (is_debug) {
>>>>>>> +        /*
>>>>>>> +         * For debugger memory access, we want to do the map 
>>>>>>> when there is a
>>>>>>> +         * legal mapping, even if the mapping is not yet in 
>>>>>>> TLB. return 0 if
>>>>>>> +         * there is a valid map, else none zero.
>>>>>>> +         */
>>>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>>>> address);
>>>>>>>       }
>>>>>>>         return TLBRET_NOMATCH;
>>>>>>> @@ -158,7 +251,8 @@ static int 
>>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>>   #else
>>>>>>>   static int loongarch_map_address(CPULoongArchState *env, 
>>>>>>> hwaddr *physical,
>>>>>>>                                    int *prot, target_ulong address,
>>>>>>> -                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx)
>>>>>>> +                                 MMUAccessType access_type, int 
>>>>>>> mmu_idx,
>>>>>>> +                                 int is_debug)
>>>>>>>   {
>>>>>>>       return TLBRET_NOMATCH;
>>>>>>>   }
>>>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState 
>>>>>>> *env, target_ulong va,
>>>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr 
>>>>>>> *physical,
>>>>>>>                            int *prot, target_ulong address,
>>>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>>>> +                         MMUAccessType access_type, int 
>>>>>>> mmu_idx, int is_debug)
>>>>>>>   {
>>>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>>>> *env, hwaddr *physical,
>>>>>>>         /* Mapped address */
>>>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>>>> -                                 access_type, mmu_idx);
>>>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>>>   }
>>>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr 
>>>>>>> addr)
>>>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>>>       int prot;
>>>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>>>> MMU_DATA_LOAD,
>>>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>>>           return -1;
>>>>>>>       }
>>>>>>>       return phys_addr;


Re: [PATCH] feat: add loongarch page table walker support for debugger memory access
Posted by bibo mao 3 months ago

On 2024/12/31 上午10:51, Miao Hao wrote:
> 
> On 2024/12/31 10:23, bibo mao wrote:
>>
>>
>> On 2024/12/31 上午10:08, Miao Hao wrote:
>>>
>>> On 2024/12/31 09:18, bibo mao wrote:
>>>>
>>>>
>>>> On 2024/12/31 上午9:09, bibo mao wrote:
>>>>>
>>>>>
>>>>> On 2024/12/30 下午3:04, Miao Hao wrote:
>>>>>> Hi Bibo,
>>>>>>
>>>>>> Thanks for your review. I apologize for my late respond due to 
>>>>>> some personal reasons.
>>>>>>
>>>>>> On 2024/12/19 17:57, bibo mao wrote:
>>>>>>> Hi Miao,
>>>>>>>
>>>>>>> Thanks for doing this. It is useful to debug VM.
>>>>>>>
>>>>>>> On 2024/12/19 上午11:24, Miao Hao wrote:
>>>>>>>> Signed-off-by: Miao Hao <haomiao23s@ict.ac.cn>
>>>>>>>> ---
>>>>>>>>   target/loongarch/cpu_helper.c     | 104 
>>>>>>>> ++++++++++++++++++++++++++++--
>>>>>>>>   target/loongarch/internals.h      |   4 +-
>>>>>>>>   target/loongarch/tcg/tlb_helper.c |   4 +-
>>>>>>>>   3 files changed, 104 insertions(+), 8 deletions(-)
>>>>>>>>
>>>>>>>> diff --git a/target/loongarch/cpu_helper.c 
>>>>>>>> b/target/loongarch/cpu_helper.c
>>>>>>>> index 580362ac3e..c0828a813d 100644
>>>>>>>> --- a/target/loongarch/cpu_helper.c
>>>>>>>> +++ b/target/loongarch/cpu_helper.c
>>>>>>>> @@ -141,9 +141,95 @@ bool loongarch_tlb_search(CPULoongArchState 
>>>>>>>> *env, target_ulong vaddr,
>>>>>>>>       return false;
>>>>>>>>   }
>>>>>>>>   +static int loongarch_page_table_walker(CPULoongArchState 
>>>>>>>> *env, hwaddr *physical,
>>>>>>>> +                                 int *prot, target_ulong address)
>>>>>>>> +{
>>>>>>>> +    CPUState *cs = env_cpu(env);
>>>>>>>> +    target_ulong index, phys;
>>>>>>>> +    int shift;
>>>>>>>> +    uint64_t dir_base, dir_width;
>>>>>>>> +    uint64_t base;
>>>>>>>> +    int level;
>>>>>>>> +
>>>>>>>> +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>>>>>>> +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>>>> +    shift = (shift + 1) * 3;
>>>>>>
>>>>>> the assignment of variable shift and the corresponding comment is 
>>>>>> incorrect here, and details are logged in the v1.03 change log of 
>>>>>> LoongArch specification volume1 
>>>>>> (https://loongson.cn/uploads/images/2023102309132647981.%E9%BE%99%E8%8A%AF%E6%9E%B6%E6%9E%84%E5%8F%82%E8%80%83%E6%89%8B%E5%86%8C%E5%8D%B7%E4%B8%80_r1p10.pdf) 
>>>>>>
>>>>>>
>>>>>>      /* 0:64bit, 1:128bit, 2:256bit, 3:512bit */
>>>>>>      shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>>>>>      shift = shift + 3;
>>>>> Ok, I see.
>>>>> It seems that this is right, thanks for the detailed explanation.
>>>>>
>>>>>>
>>>>>>>> +
>>>>>>>> +    if ((address >> 63) & 0x1) {
>>>>>>>> +        base = env->CSR_PGDH;
>>>>>>>> +    } else {
>>>>>>>> +        base = env->CSR_PGDL;
>>>>>>>> +    }
>>>>>>>> +    base &= TARGET_PHYS_MASK;
>>>>>>>> +
>>>>>>>> +    for (level = 4; level > 0; level--) {
>>>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, level);
>>>>>>>> +
>>>>>>>> +        if (dir_width != 0) {
>>>>>>> how about check whether it equeal to 0 firstly like this?
>>>>>>>            if (dir_width == 0)
>>>>>>>                continue;
>>>>>>>
>>>>>> It's good to reduce code nesting, I will adopt this suggestion.
>>>>>>>> +            /* get next level page directory */
>>>>>>>> +            index = (address >> dir_base) & ((1 << dir_width) - 
>>>>>>>> 1);
>>>>>>>> +            phys = base | index << shift;
>>>>>>> Here will only load first 64bit if shift is not 0, such as 
>>>>>>> 1:128bit, 2:192bit, 3:256bit
>>>>>>>
>>>>>> After fixing the assignment of shift, this issue no longer exists. 
>>>>>> Shift is less than or equal to 6, and index is 6 bit. Thus, index 
>>>>>> << shift is at most 12 bit.
>>>>> Supposing one pte entry is 128bit, value of shift is 4.
>>>>>     phys = base | index << shift;  will be the same as
>>>>>     phys = base | index * 16;
>>>>> however all accessed pte entry is 16 bytes aligned, pte entry with 
>>>>> "phys = base | index * 16 + 8" will never be accessed. Is that right?
>>>>>
>>> I see what you mean, but since all accessed pte entry is 16 bytes 
>>> aligned, why there is a pte entry with "phys = base | index * 16 + 
>>> 8"? On the other hand, a 128 bit pte entry remains undefined in 
>>> LoongArch specification volume1, which only defines 64 bit pte entry 
>>> (in section 5.4.5). Perhaps the CSR_PWCL.PTEWIDTH is for future 
>>> extension, so I think we can just leave it alone since it works well 
>>> on 64 bit pte.
>> IIRC, for 128 bit pte entry, it presents two pages. physical address 
>> of page0 is located phys = base | index * 16, page1 can be located at
>>   phys = base | index * 16 + 8
> If a pte presents two pages, a virtual address is not mapped to a unique 
> physical address. The key is the format of a 128 bit pte, which is 
> undefined.
emm, I am messed with it.

For 128 bit pte, one pte represents two consecutive pages, its physical 
address can be located from address "phys = base | index * 16", address 
"phys = base | index * 16 + 8" is undefined.
>>
>> If we want to only care about 64 bit pte, the following sentences are 
>> suggested to removed.
>>    +    /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
>>    +    shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
>>    +    shift = (shift + 1) * 3;
>>
> The reason for retaining this is to be consistent with helper_lddir and 
> helper_ldpte in target/loongarch/tcg/tlb_helper.c
Wait a moment, I will check whether we can remove it totally.

Regards
Bibo Mao
> 
> 
> Regards
> 
> Miao Hao
> 
>> Regards
>> Bibo Mao
>>>>> I think it should be something like this.
>>>>>        index = (address >> dir_base) & ((1 << (dir_width + shift) - 
>>>>> 1);
>>>>>        phys = base | index << 3;
>>>> Sorry, it should be shift - 3.
>>>>  index = (address >> dir_base) & (1 << (dir_width + shift - 3) - 1);
>>>>  phys = base | index << 3;
>>>>
>>> In my understanding, assignment "index = (address >> dir_base) & ((1 
>>> << dir_width) - 1)" retrieves the corresponding bits in the virtual 
>>> address set in CSR_PWC[L|H] as the index (Figure 5-2 in LoongArch 
>>> specification volume1), and CSR_PWCL.PTEWIDTH pads zeros to retrieved 
>>> bits to align.
>>>
>>> Here's an example, a 3 level page table with 16KB pages in Loongarch 
>>> linux setup:
>>>
>>> virtual address:
>>>
>>> +---------------+---------------+---------------+---------------+
>>>
>>> |46               36|35               25|24 14|13                 0|
>>>
>>> +---------------+---------------+---------------+---------------+
>>>
>>>     dir3_index    dir1_index    pt_index      page_offset
>>>
>>> CSR_PWCL:
>>>
>>> |00|00000|00000|11001|01011|01110|01011|
>>>
>>> CSR_PWCH:
>>>
>>> |0000000|0|000000|000000|100100|001011|
>>>
>>> Note that dir3_index, dir1_index and pt_index are 11 bit and 
>>> CSR_PWCL.PTEWidth=0. To index a 64 bit pte in the 16KB(2^14B) page 
>>> table, we should pad (0 + 3) zeros to pt_index.
>>>
>>>
>>> Regards
>>>
>>> Miao, Hao
>>>
>>>>>
>>>>> Regards
>>>>> Bibo, Mao
>>>>>>
>>>>>>>> +            base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
>>>>>>>> +            if (!FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>>>> +                /* mask off page dir permission bits */
>>>>>>>> +                base &= TARGET_PAGE_MASK;
>>>>>>>> +            } else {
>>>>>>>> +                /* base is a huge pte */
>>>>>>>> +                break;
>>>>>>>> +            }
>>>>>>>> +
>>>>>>>> +            if (base == 0) {
>>>>>>> physical adddress 0 is valid and Valid bit will be checked in 
>>>>>>> later. Can we remove this?
>>>>>> the value of base equals to 0 means that the current page 
>>>>>> directory entry does not point to next level page directory, so we 
>>>>>> return here
>>>>>>>> +                return TLBRET_NOMATCH;
>>>>>>>> +            }
>>>>>>>
>>>>>>>> +        }
>>>>>>>> +    }
>>>>>>>> +
>>>>>>>> +    /* pte */
>>>>>>>> +    if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>>>>>>>> +        /* Huge Page. base is pte */
>>>>>>>> +        base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
>>>>>>>> +        base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
>>>>>>>> +        if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
>>>>>>>> +            base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
>>>>>>>> +            base = FIELD_DP64(base, TLBENTRY, G, 1);
>>>>>>>> +        }
>>>>>>>> +    } else {
>>>>>>>> +        /* Normal Page. base points to pte */
>>>>>>>> +        get_dir_base_width(env, &dir_base, &dir_width, 0);
>>>>>>>> +        index = (address >> dir_base) & ((1 << dir_width) - 1);
>>>>>>>> +        phys = base | index << shift;
>>>>>>> Ditto, shift may be wider than 64-bit.
>>>>>>>
>>>>>>> Regards
>>>>>>> Bibo Mao
>>>>>>
>>>>>> Ditto, shift is less than or equal to 6.
>>>>>>
>>>>>>
>>>>>> Regards
>>>>>>
>>>>>> Miao Hao
>>>>>>
>>>>>>>> +        base = ldq_phys(cs->as, phys);
>>>>>>>> +    }
>>>>>>>> +
>>>>>>>> +    /* TODO: check plv and other bits? */
>>>>>>>> +
>>>>>>>> +    /* base is pte, in normal pte format */
>>>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, V)) {
>>>>>>>> +        return TLBRET_NOMATCH;
>>>>>>>> +    }
>>>>>>>> +
>>>>>>>> +    if (!FIELD_EX64(base, TLBENTRY, D)) {
>>>>>>>> +        *prot = PAGE_READ;
>>>>>>>> +    } else {
>>>>>>>> +        *prot = PAGE_READ | PAGE_WRITE;
>>>>>>>> +    }
>>>>>>>> +
>>>>>>>> +    /* get TARGET_PAGE_SIZE aligned physical address */
>>>>>>>> +    base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
>>>>>>>> +    /* mask RPLV, NX, NR bits */
>>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
>>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
>>>>>>>> +    base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
>>>>>>>> +    /* mask other attribute bits */
>>>>>>>> +    *physical = base & TARGET_PAGE_MASK;
>>>>>>>> +
>>>>>>>> +    return 0;
>>>>>>>> +}
>>>>>>>> +
>>>>>>>>   static int loongarch_map_address(CPULoongArchState *env, 
>>>>>>>> hwaddr *physical,
>>>>>>>>                                    int *prot, target_ulong address,
>>>>>>>> -                                 MMUAccessType access_type, int 
>>>>>>>> mmu_idx)
>>>>>>>> +                                 MMUAccessType access_type, int 
>>>>>>>> mmu_idx,
>>>>>>>> +                                 int is_debug)
>>>>>>>>   {
>>>>>>>>       int index, match;
>>>>>>>>   @@ -151,6 +237,13 @@ static int 
>>>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>>>       if (match) {
>>>>>>>>           return loongarch_map_tlb_entry(env, physical, prot,
>>>>>>>>                                          address, access_type, 
>>>>>>>> index, mmu_idx);
>>>>>>>> +    } else if (is_debug) {
>>>>>>>> +        /*
>>>>>>>> +         * For debugger memory access, we want to do the map 
>>>>>>>> when there is a
>>>>>>>> +         * legal mapping, even if the mapping is not yet in 
>>>>>>>> TLB. return 0 if
>>>>>>>> +         * there is a valid map, else none zero.
>>>>>>>> +         */
>>>>>>>> +        return loongarch_page_table_walker(env, physical, prot, 
>>>>>>>> address);
>>>>>>>>       }
>>>>>>>>         return TLBRET_NOMATCH;
>>>>>>>> @@ -158,7 +251,8 @@ static int 
>>>>>>>> loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
>>>>>>>>   #else
>>>>>>>>   static int loongarch_map_address(CPULoongArchState *env, 
>>>>>>>> hwaddr *physical,
>>>>>>>>                                    int *prot, target_ulong address,
>>>>>>>> -                                 MMUAccessType access_type, int 
>>>>>>>> mmu_idx)
>>>>>>>> +                                 MMUAccessType access_type, int 
>>>>>>>> mmu_idx,
>>>>>>>> +                                 int is_debug)
>>>>>>>>   {
>>>>>>>>       return TLBRET_NOMATCH;
>>>>>>>>   }
>>>>>>>> @@ -178,7 +272,7 @@ static hwaddr dmw_va2pa(CPULoongArchState 
>>>>>>>> *env, target_ulong va,
>>>>>>>>     int get_physical_address(CPULoongArchState *env, hwaddr 
>>>>>>>> *physical,
>>>>>>>>                            int *prot, target_ulong address,
>>>>>>>> -                         MMUAccessType access_type, int mmu_idx)
>>>>>>>> +                         MMUAccessType access_type, int 
>>>>>>>> mmu_idx, int is_debug)
>>>>>>>>   {
>>>>>>>>       int user_mode = mmu_idx == MMU_USER_IDX;
>>>>>>>>       int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
>>>>>>>> @@ -222,7 +316,7 @@ int get_physical_address(CPULoongArchState 
>>>>>>>> *env, hwaddr *physical,
>>>>>>>>         /* Mapped address */
>>>>>>>>       return loongarch_map_address(env, physical, prot, address,
>>>>>>>> -                                 access_type, mmu_idx);
>>>>>>>> +                                 access_type, mmu_idx, is_debug);
>>>>>>>>   }
>>>>>>>>     hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr 
>>>>>>>> addr)
>>>>>>>> @@ -232,7 +326,7 @@ hwaddr 
>>>>>>>> loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>>>>>>>>       int prot;
>>>>>>>>         if (get_physical_address(env, &phys_addr, &prot, addr, 
>>>>>>>> MMU_DATA_LOAD,
>>>>>>>> -                             cpu_mmu_index(cs, false)) != 0) {
>>>>>>>> +                             cpu_mmu_index(cs, false), 1) != 0) {
>>>>>>>>           return -1;
>>>>>>>>       }
>>>>>>>>       return phys_addr;
>