[PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE

Andrew S. Rightenburg via qemu development posted 2 patches 1 month, 1 week ago
Maintainers: Song Gao <gaosong@loongson.cn>
There is a newer version of this series
[PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE
Posted by Andrew S. Rightenburg via qemu development 1 month, 1 week ago
From: rail5 <andrew@rail5.org>

The LDDIR/LDPTE helpers load a page table entry (or huge page entry)
from guest memory and currently apply the PALEN mask to the whole
64-bit value.

That mask is intended to constrain the physical address bits, but masking
the full entry also clears permission bits in the upper part of the PTE,
including NX (bit 62). As a result, LoongArch TCG can incorrectly allow
instruction fetches from NX mappings when translation is driven through
these helpers.

Mask only the PPN/address field and preserve the rest of the PTE.

This was reported as a bug at:
https://gitlab.com/qemu-project/qemu/-/issues/3319

Fixes: 56599a705f2 ("target/loongarch: Introduce loongarch_palen_mask()")
Cc: qemu-stable@nongnu.org
Signed-off-by: rail5 (Andrew S. Rightenburg) <andrew@rail5.org>
---
 target/loongarch/tcg/tlb_helper.c | 29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)

diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index c1dc77a8f8..8747fa2a0f 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -686,6 +686,24 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     cpu_loop_exit_restore(cs, retaddr);
 }
 
+static inline uint64_t loongarch_mask_pte_ppn(CPULoongArchState *env,
+                                              uint64_t pte)
+{
+    uint64_t palen_mask = loongarch_palen_mask(env);
+
+    if (is_la64(env)) {
+        uint64_t ppn_bits = pte & MAKE_64BIT_MASK(12, 36);
+        uint64_t ppn_masked = ppn_bits & palen_mask;
+
+        return (pte & ~MAKE_64BIT_MASK(12, 36)) | ppn_masked;
+    } else {
+        uint64_t ppn_bits = pte & MAKE_64BIT_MASK(8, 24);
+        uint64_t ppn_masked = ppn_bits & palen_mask;
+
+        return (pte & ~MAKE_64BIT_MASK(8, 24)) | ppn_masked;
+    }
+}
+
 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
                           uint32_t level, uint32_t mem_idx)
 {
@@ -721,7 +739,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
     get_dir_base_width(env, &dir_base, &dir_width, level);
     index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
     phys = base | index << 3;
-    return ldq_le_phys(cs->as, phys) & palen_mask;
+    return loongarch_mask_pte_ppn(env, ldq_le_phys(cs->as, phys));
 }
 
 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
@@ -729,6 +747,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
 {
     CPUState *cs = env_cpu(env);
     hwaddr phys, tmp0, ptindex, ptoffset0, ptoffset1;
+    uint64_t pte_raw;
     uint64_t badv;
     uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
     uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
@@ -744,7 +763,6 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
      * and the other is the huge page entry,
      * whose bit 6 should be 1.
      */
-    base = base & palen_mask;
     if (FIELD_EX64(base, TLBENTRY, HUGE)) {
         /*
          * Gets the huge page level and Gets huge page size.
@@ -768,7 +786,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
          * when loaded into the tlb,
          * so the tlb page size needs to be divided by 2.
          */
-        tmp0 = base;
+        tmp0 = loongarch_mask_pte_ppn(env, base);
         if (odd) {
             tmp0 += MAKE_64BIT_MASK(ps, 1);
         }
@@ -780,12 +798,15 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
     } else {
         badv = env->CSR_TLBRBADV;
 
+        base = base & palen_mask;
+
         ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
         ptindex = ptindex & ~0x1;   /* clear bit 0 */
         ptoffset0 = ptindex << 3;
         ptoffset1 = (ptindex + 1) << 3;
         phys = base | (odd ? ptoffset1 : ptoffset0);
-        tmp0 = ldq_le_phys(cs->as, phys) & palen_mask;
+        pte_raw = ldq_le_phys(cs->as, phys);
+        tmp0 = loongarch_mask_pte_ppn(env, pte_raw);
         ps = ptbase;
     }
 
-- 
2.47.3
Re: [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE
Posted by Bibo Mao 1 month ago

On 2026/3/5 下午9:54, Andrew S. Rightenburg via qemu development wrote:
> From: rail5 <andrew@rail5.org>
> 
> The LDDIR/LDPTE helpers load a page table entry (or huge page entry)
> from guest memory and currently apply the PALEN mask to the whole
> 64-bit value.
> 
> That mask is intended to constrain the physical address bits, but masking
> the full entry also clears permission bits in the upper part of the PTE,
> including NX (bit 62). As a result, LoongArch TCG can incorrectly allow
> instruction fetches from NX mappings when translation is driven through
> these helpers.
Good catch.
yeap, it is actually one problem, the upper HW PTE bits are lost.

> 
> Mask only the PPN/address field and preserve the rest of the PTE.
> 
> This was reported as a bug at:
> https://gitlab.com/qemu-project/qemu/-/issues/3319
> 
> Fixes: 56599a705f2 ("target/loongarch: Introduce loongarch_palen_mask()")
> Cc: qemu-stable@nongnu.org
> Signed-off-by: rail5 (Andrew S. Rightenburg) <andrew@rail5.org>
> ---
>   target/loongarch/tcg/tlb_helper.c | 29 +++++++++++++++++++++++++----
>   1 file changed, 25 insertions(+), 4 deletions(-)
> 
> diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
> index c1dc77a8f8..8747fa2a0f 100644
> --- a/target/loongarch/tcg/tlb_helper.c
> +++ b/target/loongarch/tcg/tlb_helper.c
> @@ -686,6 +686,24 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>       cpu_loop_exit_restore(cs, retaddr);
>   }
>   
> +static inline uint64_t loongarch_mask_pte_ppn(CPULoongArchState *env,
> +                                              uint64_t pte)
> +{
> +    uint64_t palen_mask = loongarch_palen_mask(env);
> +
> +    if (is_la64(env)) {
> +        uint64_t ppn_bits = pte & MAKE_64BIT_MASK(12, 36);
> +        uint64_t ppn_masked = ppn_bits & palen_mask;
> +
> +        return (pte & ~MAKE_64BIT_MASK(12, 36)) | ppn_masked;
value 36 is hard-code, also I think software PTE bit should be cleared.
how about add HW PTE mask element in structure CPULoongArchState?

> +    } else {
> +        uint64_t ppn_bits = pte & MAKE_64BIT_MASK(8, 24);
> +        uint64_t ppn_masked = ppn_bits & palen_mask;
> +
> +        return (pte & ~MAKE_64BIT_MASK(8, 24)) | ppn_masked;
> +    }
> +}
> +
>   target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
>                             uint32_t level, uint32_t mem_idx)
>   {
> @@ -721,7 +739,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
>       get_dir_base_width(env, &dir_base, &dir_width, level);
>       index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
>       phys = base | index << 3;
> -    return ldq_le_phys(cs->as, phys) & palen_mask;
> +    return loongarch_mask_pte_ppn(env, ldq_le_phys(cs->as, phys));
I think that it is not needed with lddir() API, lddir returns physical 
address of page table without PTE permission. only helper_ldpte() need 
modification.

Regards
Bibo Mao

>   }
>   
>   void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
> @@ -729,6 +747,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
>   {
>       CPUState *cs = env_cpu(env);
>       hwaddr phys, tmp0, ptindex, ptoffset0, ptoffset1;
> +    uint64_t pte_raw;
>       uint64_t badv;
>       uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
>       uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
> @@ -744,7 +763,6 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
>        * and the other is the huge page entry,
>        * whose bit 6 should be 1.
>        */
> -    base = base & palen_mask;
>       if (FIELD_EX64(base, TLBENTRY, HUGE)) {
>           /*
>            * Gets the huge page level and Gets huge page size.
> @@ -768,7 +786,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
>            * when loaded into the tlb,
>            * so the tlb page size needs to be divided by 2.
>            */
> -        tmp0 = base;
> +        tmp0 = loongarch_mask_pte_ppn(env, base);
>           if (odd) {
>               tmp0 += MAKE_64BIT_MASK(ps, 1);
>           }
> @@ -780,12 +798,15 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
>       } else {
>           badv = env->CSR_TLBRBADV;
>   
> +        base = base & palen_mask;
> +
>           ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
>           ptindex = ptindex & ~0x1;   /* clear bit 0 */
>           ptoffset0 = ptindex << 3;
>           ptoffset1 = (ptindex + 1) << 3;
>           phys = base | (odd ? ptoffset1 : ptoffset0);
> -        tmp0 = ldq_le_phys(cs->as, phys) & palen_mask;
> +        pte_raw = ldq_le_phys(cs->as, phys);
> +        tmp0 = loongarch_mask_pte_ppn(env, pte_raw);
>           ps = ptbase;
>       }
>   
>