[PATCH v2] target/ppc: Fix VRMA page size for ISA v3.0

Nicholas Piggin posted 1 patch 9 months, 1 week ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20230730111842.39292-1-npiggin@gmail.com
Maintainers: Daniel Henrique Barboza <danielhb413@gmail.com>, "Cédric Le Goater" <clg@kaod.org>, David Gibson <david@gibson.dropbear.id.au>, Greg Kurz <groug@kaod.org>, Nicholas Piggin <npiggin@gmail.com>
target/ppc/mmu-hash64.c | 45 +++++++++++++++++++++++++++++++++++------
target/ppc/mmu-hash64.h |  5 +++++
2 files changed, 44 insertions(+), 6 deletions(-)
[PATCH v2] target/ppc: Fix VRMA page size for ISA v3.0
Posted by Nicholas Piggin 9 months, 1 week ago
Until v2.07s, the VRMA page size (L||LP) was encoded in LPCR[VRMASD].
In v3.0 that moved to the partition table PS field.

The powernv machine can now run KVM HPT guests on POWER9/10 CPUs with
this fix and the patch to add ASDR.

Fixes: 3367c62f522b ("target/ppc: Support for POWER9 native hash")
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
Since v1:
- Added llp variable to avoid calling get_vrma_llp twice [Cedric].
- Added some bit defines for architected fields and values [Cedric].

Patches 1,3 from the previously posted series, let's defer 4-6
decrementer fixes until after 8.1, so this is the last remaining
one from the series.

Thanks,
Nick

 target/ppc/mmu-hash64.c | 45 +++++++++++++++++++++++++++++++++++------
 target/ppc/mmu-hash64.h |  5 +++++
 2 files changed, 44 insertions(+), 6 deletions(-)

diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index a0c90df3ce..d645c0bb94 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -874,12 +874,46 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
     return rma_sizes[rmls];
 }
 
-static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+/* Return the LLP in SLB_VSID format */
+static uint64_t get_vrma_llp(PowerPCCPU *cpu)
 {
     CPUPPCState *env = &cpu->env;
-    target_ulong lpcr = env->spr[SPR_LPCR];
-    uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
-    target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
+    uint64_t llp;
+
+    if (env->mmu_model == POWERPC_MMU_3_00) {
+        ppc_v3_pate_t pate;
+        uint64_t ps, l, lp;
+
+        /*
+         * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
+         * page size (L||LP equivalent) in the PS field in the HPT partition
+         * table entry.
+         */
+        if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
+            error_report("Bad VRMA with no partition table entry");
+            return 0;
+        }
+        ps = PATE0_GET_PS(pate.dw0);
+        /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
+        l = (ps >> 2) & 0x1;
+        lp = ps & 0x3;
+        llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT);
+
+    } else {
+        uint64_t lpcr = env->spr[SPR_LPCR];
+        target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+
+        /* VRMASD LLP matches SLB format, just shift and mask it */
+        llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK;
+    }
+
+    return llp;
+}
+
+static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+{
+    uint64_t llp = get_vrma_llp(cpu);
+    target_ulong vsid = SLB_VSID_VRMA | llp;
     int i;
 
     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
@@ -897,8 +931,7 @@ static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
         }
     }
 
-    error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
-                 TARGET_FMT_lx, lpcr);
+    error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp);
 
     return -1;
 }
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
index 1496955d38..de653fcae5 100644
--- a/target/ppc/mmu-hash64.h
+++ b/target/ppc/mmu-hash64.h
@@ -41,8 +41,10 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
 #define SLB_VSID_KP             0x0000000000000400ULL
 #define SLB_VSID_N              0x0000000000000200ULL /* no-execute */
 #define SLB_VSID_L              0x0000000000000100ULL
+#define SLB_VSID_L_SHIFT        PPC_BIT_NR(55)
 #define SLB_VSID_C              0x0000000000000080ULL /* class */
 #define SLB_VSID_LP             0x0000000000000030ULL
+#define SLB_VSID_LP_SHIFT       PPC_BIT_NR(59)
 #define SLB_VSID_ATTR           0x0000000000000FFFULL
 #define SLB_VSID_LLP_MASK       (SLB_VSID_L | SLB_VSID_LP)
 #define SLB_VSID_4K             0x0000000000000000ULL
@@ -58,6 +60,9 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
 #define SDR_64_HTABSIZE        0x000000000000001FULL
 
 #define PATE0_HTABORG           0x0FFFFFFFFFFC0000ULL
+#define PATE0_PS                PPC_BITMASK(56, 58)
+#define PATE0_GET_PS(dw0)       (((dw0) & PATE0_PS) >> PPC_BIT_NR(58))
+
 #define HPTES_PER_GROUP         8
 #define HASH_PTE_SIZE_64        16
 #define HASH_PTEG_SIZE_64       (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
-- 
2.40.1
Re: [PATCH v2] target/ppc: Fix VRMA page size for ISA v3.0
Posted by Daniel Henrique Barboza 9 months ago
Queued in gitlab.com/danielhb/qemu/tree/ppc-next. Thanks,


Daniel

On 7/30/23 08:18, Nicholas Piggin wrote:
> Until v2.07s, the VRMA page size (L||LP) was encoded in LPCR[VRMASD].
> In v3.0 that moved to the partition table PS field.
> 
> The powernv machine can now run KVM HPT guests on POWER9/10 CPUs with
> this fix and the patch to add ASDR.
> 
> Fixes: 3367c62f522b ("target/ppc: Support for POWER9 native hash")
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> Since v1:
> - Added llp variable to avoid calling get_vrma_llp twice [Cedric].
> - Added some bit defines for architected fields and values [Cedric].
> 
> Patches 1,3 from the previously posted series, let's defer 4-6
> decrementer fixes until after 8.1, so this is the last remaining
> one from the series.
> 
> Thanks,
> Nick
> 
>   target/ppc/mmu-hash64.c | 45 +++++++++++++++++++++++++++++++++++------
>   target/ppc/mmu-hash64.h |  5 +++++
>   2 files changed, 44 insertions(+), 6 deletions(-)
> 
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index a0c90df3ce..d645c0bb94 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -874,12 +874,46 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
>       return rma_sizes[rmls];
>   }
>   
> -static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
> +/* Return the LLP in SLB_VSID format */
> +static uint64_t get_vrma_llp(PowerPCCPU *cpu)
>   {
>       CPUPPCState *env = &cpu->env;
> -    target_ulong lpcr = env->spr[SPR_LPCR];
> -    uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> -    target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
> +    uint64_t llp;
> +
> +    if (env->mmu_model == POWERPC_MMU_3_00) {
> +        ppc_v3_pate_t pate;
> +        uint64_t ps, l, lp;
> +
> +        /*
> +         * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
> +         * page size (L||LP equivalent) in the PS field in the HPT partition
> +         * table entry.
> +         */
> +        if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
> +            error_report("Bad VRMA with no partition table entry");
> +            return 0;
> +        }
> +        ps = PATE0_GET_PS(pate.dw0);
> +        /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
> +        l = (ps >> 2) & 0x1;
> +        lp = ps & 0x3;
> +        llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT);
> +
> +    } else {
> +        uint64_t lpcr = env->spr[SPR_LPCR];
> +        target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> +
> +        /* VRMASD LLP matches SLB format, just shift and mask it */
> +        llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK;
> +    }
> +
> +    return llp;
> +}
> +
> +static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
> +{
> +    uint64_t llp = get_vrma_llp(cpu);
> +    target_ulong vsid = SLB_VSID_VRMA | llp;
>       int i;
>   
>       for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> @@ -897,8 +931,7 @@ static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
>           }
>       }
>   
> -    error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
> -                 TARGET_FMT_lx, lpcr);
> +    error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp);
>   
>       return -1;
>   }
> diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
> index 1496955d38..de653fcae5 100644
> --- a/target/ppc/mmu-hash64.h
> +++ b/target/ppc/mmu-hash64.h
> @@ -41,8 +41,10 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
>   #define SLB_VSID_KP             0x0000000000000400ULL
>   #define SLB_VSID_N              0x0000000000000200ULL /* no-execute */
>   #define SLB_VSID_L              0x0000000000000100ULL
> +#define SLB_VSID_L_SHIFT        PPC_BIT_NR(55)
>   #define SLB_VSID_C              0x0000000000000080ULL /* class */
>   #define SLB_VSID_LP             0x0000000000000030ULL
> +#define SLB_VSID_LP_SHIFT       PPC_BIT_NR(59)
>   #define SLB_VSID_ATTR           0x0000000000000FFFULL
>   #define SLB_VSID_LLP_MASK       (SLB_VSID_L | SLB_VSID_LP)
>   #define SLB_VSID_4K             0x0000000000000000ULL
> @@ -58,6 +60,9 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
>   #define SDR_64_HTABSIZE        0x000000000000001FULL
>   
>   #define PATE0_HTABORG           0x0FFFFFFFFFFC0000ULL
> +#define PATE0_PS                PPC_BITMASK(56, 58)
> +#define PATE0_GET_PS(dw0)       (((dw0) & PATE0_PS) >> PPC_BIT_NR(58))
> +
>   #define HPTES_PER_GROUP         8
>   #define HASH_PTE_SIZE_64        16
>   #define HASH_PTEG_SIZE_64       (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
Re: [PATCH v2] target/ppc: Fix VRMA page size for ISA v3.0
Posted by Cédric Le Goater 9 months, 1 week ago
On 7/30/23 13:18, Nicholas Piggin wrote:
> Until v2.07s, the VRMA page size (L||LP) was encoded in LPCR[VRMASD].
> In v3.0 that moved to the partition table PS field.
> 
> The powernv machine can now run KVM HPT guests on POWER9/10 CPUs with
> this fix and the patch to add ASDR.
> 
> Fixes: 3367c62f522b ("target/ppc: Support for POWER9 native hash")
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> Since v1:
> - Added llp variable to avoid calling get_vrma_llp twice [Cedric].
> - Added some bit defines for architected fields and values [Cedric].

Thanks,


Reviewed-by: Cédric Le Goater <clg@kaod.org>

C.


> 
> Patches 1,3 from the previously posted series, let's defer 4-6
> decrementer fixes until after 8.1, so this is the last remaining
> one from the series.
> 
> Thanks,
> Nick
> 
>   target/ppc/mmu-hash64.c | 45 +++++++++++++++++++++++++++++++++++------
>   target/ppc/mmu-hash64.h |  5 +++++
>   2 files changed, 44 insertions(+), 6 deletions(-)
> 
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index a0c90df3ce..d645c0bb94 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -874,12 +874,46 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
>       return rma_sizes[rmls];
>   }
>   
> -static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
> +/* Return the LLP in SLB_VSID format */
> +static uint64_t get_vrma_llp(PowerPCCPU *cpu)
>   {
>       CPUPPCState *env = &cpu->env;
> -    target_ulong lpcr = env->spr[SPR_LPCR];
> -    uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> -    target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
> +    uint64_t llp;
> +
> +    if (env->mmu_model == POWERPC_MMU_3_00) {
> +        ppc_v3_pate_t pate;
> +        uint64_t ps, l, lp;
> +
> +        /*
> +         * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
> +         * page size (L||LP equivalent) in the PS field in the HPT partition
> +         * table entry.
> +         */
> +        if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
> +            error_report("Bad VRMA with no partition table entry");
> +            return 0;
> +        }
> +        ps = PATE0_GET_PS(pate.dw0);
> +        /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
> +        l = (ps >> 2) & 0x1;
> +        lp = ps & 0x3;
> +        llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT);
> +
> +    } else {
> +        uint64_t lpcr = env->spr[SPR_LPCR];
> +        target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> +
> +        /* VRMASD LLP matches SLB format, just shift and mask it */
> +        llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK;
> +    }
> +
> +    return llp;
> +}
> +
> +static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
> +{
> +    uint64_t llp = get_vrma_llp(cpu);
> +    target_ulong vsid = SLB_VSID_VRMA | llp;
>       int i;
>   
>       for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> @@ -897,8 +931,7 @@ static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
>           }
>       }
>   
> -    error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
> -                 TARGET_FMT_lx, lpcr);
> +    error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp);
>   
>       return -1;
>   }
> diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
> index 1496955d38..de653fcae5 100644
> --- a/target/ppc/mmu-hash64.h
> +++ b/target/ppc/mmu-hash64.h
> @@ -41,8 +41,10 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
>   #define SLB_VSID_KP             0x0000000000000400ULL
>   #define SLB_VSID_N              0x0000000000000200ULL /* no-execute */
>   #define SLB_VSID_L              0x0000000000000100ULL
> +#define SLB_VSID_L_SHIFT        PPC_BIT_NR(55)
>   #define SLB_VSID_C              0x0000000000000080ULL /* class */
>   #define SLB_VSID_LP             0x0000000000000030ULL
> +#define SLB_VSID_LP_SHIFT       PPC_BIT_NR(59)
>   #define SLB_VSID_ATTR           0x0000000000000FFFULL
>   #define SLB_VSID_LLP_MASK       (SLB_VSID_L | SLB_VSID_LP)
>   #define SLB_VSID_4K             0x0000000000000000ULL
> @@ -58,6 +60,9 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
>   #define SDR_64_HTABSIZE        0x000000000000001FULL
>   
>   #define PATE0_HTABORG           0x0FFFFFFFFFFC0000ULL
> +#define PATE0_PS                PPC_BITMASK(56, 58)
> +#define PATE0_GET_PS(dw0)       (((dw0) & PATE0_PS) >> PPC_BIT_NR(58))
> +
>   #define HPTES_PER_GROUP         8
>   #define HASH_PTE_SIZE_64        16
>   #define HASH_PTEG_SIZE_64       (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)