[PATCH v4 2/2] target/riscv: Make PMP region count configurable

Jay Chang posted 2 patches 5 months, 3 weeks ago
Maintainers: Palmer Dabbelt <palmer@dabbelt.com>, Alistair Francis <alistair.francis@wdc.com>, Weiwei Li <liwei1518@gmail.com>, Daniel Henrique Barboza <dbarboza@ventanamicro.com>, Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
[PATCH v4 2/2] target/riscv: Make PMP region count configurable
Posted by Jay Chang 5 months, 3 weeks ago
Previously, the number of PMP regions was hardcoded to 16 in QEMU.
This patch replaces the fixed value with a new `pmp_regions` field,
allowing platforms to configure the number of PMP regions.

If no specific value is provided, the default number of PMP regions
remains 16 to preserve the existing behavior.

A new CPU parameter num-pmp-regions has been introduced to the QEMU
command line. For example:

	-cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8

Signed-off-by: Jay Chang <jay.chang@sifive.com>
Reviewed-by: Frank Chang <frank.chang@sifive.com>
---
 target/riscv/cpu.c                | 54 +++++++++++++++++++++++++++++--
 target/riscv/cpu.h                |  3 +-
 target/riscv/cpu_cfg_fields.h.inc |  1 +
 target/riscv/csr.c                |  5 ++-
 target/riscv/machine.c            |  3 +-
 target/riscv/pmp.c                | 28 ++++++++++------
 6 files changed, 80 insertions(+), 14 deletions(-)

diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 629ac37501..8e32252c11 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -1117,6 +1117,7 @@ static void riscv_cpu_init(Object *obj)
     cpu->cfg.cbom_blocksize = 64;
     cpu->cfg.cbop_blocksize = 64;
     cpu->cfg.cboz_blocksize = 64;
+    cpu->cfg.pmp_regions = 16;
     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
     cpu->cfg.max_satp_mode = -1;
 
@@ -1568,6 +1569,46 @@ static const PropertyInfo prop_pmp = {
     .set = prop_pmp_set,
 };
 
+static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
+                                     void *opaque, Error **errp)
+{
+    RISCVCPU *cpu = RISCV_CPU(obj);
+    uint8_t value;
+
+    visit_type_uint8(v, name, &value, errp);
+
+    if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
+        cpu_set_prop_err(cpu, name, errp);
+        return;
+    }
+
+    if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) {
+        error_setg(errp, "Number of PMP regions exceeds maximum available");
+        return;
+    } else if (value > MAX_RISCV_PMPS) {
+        error_setg(errp, "Number of PMP regions exceeds maximum available");
+        return;
+    }
+
+    cpu_option_add_user_setting(name, value);
+    cpu->cfg.pmp_regions = value;
+}
+
+static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
+                                     void *opaque, Error **errp)
+{
+    uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions;
+
+    visit_type_uint8(v, name, &value, errp);
+}
+
+static const PropertyInfo prop_num_pmp_regions = {
+    .type = "uint8",
+    .description = "num-pmp-regions",
+    .get = prop_num_pmp_regions_get,
+    .set = prop_num_pmp_regions_set,
+};
+
 static int priv_spec_from_str(const char *priv_spec_str)
 {
     int priv_version = -1;
@@ -2567,6 +2608,7 @@ static const Property riscv_cpu_properties[] = {
 
     {.name = "mmu", .info = &prop_mmu},
     {.name = "pmp", .info = &prop_pmp},
+    {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
 
     {.name = "priv_spec", .info = &prop_priv_spec},
     {.name = "vext_spec", .info = &prop_vext_spec},
@@ -2891,6 +2933,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
     DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
         .cfg.mmu = true,
         .cfg.pmp = true,
+        .cfg.pmp_regions = 8,
         .priv_spec = PRIV_VERSION_LATEST,
     ),
 
@@ -2937,7 +2980,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
         .cfg.max_satp_mode = VM_1_10_MBARE,
         .cfg.ext_zifencei = true,
         .cfg.ext_zicsr = true,
-        .cfg.pmp = true
+        .cfg.pmp = true,
+        .cfg.pmp_regions = 8
     ),
 
     DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
@@ -2948,7 +2992,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
         .cfg.ext_zifencei = true,
         .cfg.ext_zicsr = true,
         .cfg.mmu = true,
-        .cfg.pmp = true
+        .cfg.pmp = true,
+        .cfg.pmp_regions = 8
     ),
 
 #if defined(TARGET_RISCV32) || \
@@ -2966,6 +3011,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
         .cfg.ext_zifencei = true,
         .cfg.ext_zicsr = true,
         .cfg.pmp = true,
+        .cfg.pmp_regions = 8,
         .cfg.ext_smepmp = true,
 
         .cfg.ext_zba = true,
@@ -3040,6 +3086,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
         .cfg.ext_xtheadmempair = true,
         .cfg.ext_xtheadsync = true,
         .cfg.pmp = true,
+        .cfg.pmp_regions = 8,
 
         .cfg.mvendorid = THEAD_VENDOR_ID,
 
@@ -3063,6 +3110,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
         .cfg.rvv_ta_all_1s = true,
         .cfg.misa_w = true,
         .cfg.pmp = true,
+        .cfg.pmp_regions = 8,
         .cfg.cbom_blocksize = 64,
         .cfg.cbop_blocksize = 64,
         .cfg.cboz_blocksize = 64,
@@ -3119,6 +3167,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
         .cfg.ext_zifencei = true,
         .cfg.ext_zicsr = true,
         .cfg.pmp = true,
+        .cfg.pmp_regions = 8,
         .cfg.ext_zicbom = true,
         .cfg.cbom_blocksize = 64,
         .cfg.cboz_blocksize = 64,
@@ -3163,6 +3212,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
 
         .cfg.mmu = true,
         .cfg.pmp = true,
+        .cfg.pmp_regions = 8,
 
         .cfg.max_satp_mode = VM_1_10_SV39,
     ),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 229ade9ed9..67323a7d9d 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -159,7 +159,8 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
 
 #define MMU_USER_IDX 3
 
-#define MAX_RISCV_PMPS (16)
+#define MAX_RISCV_PMPS (64)
+#define OLD_MAX_RISCV_PMPS (16)
 
 #if !defined(CONFIG_USER_ONLY)
 #include "pmp.h"
diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
index 59f134a419..33c4f9bac8 100644
--- a/target/riscv/cpu_cfg_fields.h.inc
+++ b/target/riscv/cpu_cfg_fields.h.inc
@@ -163,6 +163,7 @@ TYPED_FIELD(uint16_t, elen, 0)
 TYPED_FIELD(uint16_t, cbom_blocksize, 0)
 TYPED_FIELD(uint16_t, cbop_blocksize, 0)
 TYPED_FIELD(uint16_t, cboz_blocksize, 0)
+TYPED_FIELD(uint8_t,  pmp_regions, 0)
 
 TYPED_FIELD(int8_t, max_satp_mode, -1)
 
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index d6cd441133..6296ecd1e1 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -738,7 +738,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
 static RISCVException pmp(CPURISCVState *env, int csrno)
 {
     if (riscv_cpu_cfg(env)->pmp) {
-        if (csrno <= CSR_PMPCFG3) {
+        int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
++                              CSR_PMPCFG15 : CSR_PMPCFG3;
+
+        if (csrno <= max_pmpcfg) {
             uint32_t reg_index = csrno - CSR_PMPCFG0;
 
             /* TODO: RV128 restriction check */
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index c97e9ce9df..1600ec44f0 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
     RISCVCPU *cpu = opaque;
     CPURISCVState *env = &cpu->env;
     int i;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         pmp_update_rule_addr(env, i);
     }
     pmp_update_rule_nums(env);
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 5af295e410..3540327c9a 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -122,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
  */
 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
 {
-    if (pmp_index < MAX_RISCV_PMPS) {
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
+
+    if (pmp_index < pmp_regions) {
         return env->pmp_state.pmp[pmp_index].cfg_reg;
     }
 
@@ -136,7 +138,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
  */
 static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
 {
-    if (pmp_index < MAX_RISCV_PMPS) {
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
+
+    if (pmp_index < pmp_regions) {
         if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
             /* no change */
             return false;
@@ -236,9 +240,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
 void pmp_update_rule_nums(CPURISCVState *env)
 {
     int i;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
     env->pmp_state.num_rules = 0;
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         const uint8_t a_field =
             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
         if (PMP_AMATCH_OFF != a_field) {
@@ -332,6 +337,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
     int pmp_size = 0;
     hwaddr s = 0;
     hwaddr e = 0;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
     /* Short cut if no rules */
     if (0 == pmp_get_num_rules(env)) {
@@ -356,7 +362,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
      * 1.10 draft priv spec states there is an implicit order
      * from low to high
      */
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         s = pmp_is_in_range(env, i, addr);
         e = pmp_is_in_range(env, i, addr + pmp_size - 1);
 
@@ -527,8 +533,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
 {
     trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
     bool is_next_cfg_tor = false;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
-    if (addr_index < MAX_RISCV_PMPS) {
+    if (addr_index < pmp_regions) {
         if (env->pmp_state.pmp[addr_index].addr_reg == val) {
             /* no change */
             return;
@@ -538,7 +545,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
          * In TOR mode, need to check the lock bit of the next pmp
          * (if there is a next).
          */
-        if (addr_index + 1 < MAX_RISCV_PMPS) {
+        if (addr_index + 1 < pmp_regions) {
             uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
             is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
 
@@ -573,8 +580,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
 {
     target_ulong val = 0;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
-    if (addr_index < MAX_RISCV_PMPS) {
+    if (addr_index < pmp_regions) {
         val = env->pmp_state.pmp[addr_index].addr_reg;
         trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
     } else {
@@ -592,6 +600,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
 {
     int i;
     uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
     /* Update PMM field only if the value is valid according to Zjpm v1.0 */
     if (riscv_cpu_cfg(env)->ext_smmpm &&
         riscv_cpu_mxl(env) == MXL_RV64 &&
@@ -603,7 +612,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
 
     /* RLB cannot be enabled if it's already 0 and if any regions are locked */
     if (!MSECCFG_RLB_ISSET(env)) {
-        for (i = 0; i < MAX_RISCV_PMPS; i++) {
+        for (i = 0; i < pmp_regions; i++) {
             if (pmp_is_locked(env, i)) {
                 val &= ~MSECCFG_RLB;
                 break;
@@ -659,6 +668,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
     hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
     hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
     int i;
+    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
     /*
      * If PMP is not supported or there are no PMP rules, the TLB page will not
@@ -669,7 +679,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
         return TARGET_PAGE_SIZE;
     }
 
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
             continue;
         }
-- 
2.48.1
Re: [PATCH v4 2/2] target/riscv: Make PMP region count configurable
Posted by Alistair Francis 5 months, 2 weeks ago
On Thu, May 22, 2025 at 6:14 PM Jay Chang <jay.chang@sifive.com> wrote:
>
> Previously, the number of PMP regions was hardcoded to 16 in QEMU.
> This patch replaces the fixed value with a new `pmp_regions` field,
> allowing platforms to configure the number of PMP regions.
>
> If no specific value is provided, the default number of PMP regions
> remains 16 to preserve the existing behavior.
>
> A new CPU parameter num-pmp-regions has been introduced to the QEMU
> command line. For example:
>
>         -cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  target/riscv/cpu.c                | 54 +++++++++++++++++++++++++++++--
>  target/riscv/cpu.h                |  3 +-
>  target/riscv/cpu_cfg_fields.h.inc |  1 +
>  target/riscv/csr.c                |  5 ++-
>  target/riscv/machine.c            |  3 +-
>  target/riscv/pmp.c                | 28 ++++++++++------
>  6 files changed, 80 insertions(+), 14 deletions(-)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 629ac37501..8e32252c11 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -1117,6 +1117,7 @@ static void riscv_cpu_init(Object *obj)
>      cpu->cfg.cbom_blocksize = 64;
>      cpu->cfg.cbop_blocksize = 64;
>      cpu->cfg.cboz_blocksize = 64;
> +    cpu->cfg.pmp_regions = 16;
>      cpu->env.vext_ver = VEXT_VERSION_1_00_0;
>      cpu->cfg.max_satp_mode = -1;
>
> @@ -1568,6 +1569,46 @@ static const PropertyInfo prop_pmp = {
>      .set = prop_pmp_set,
>  };
>
> +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
> +                                     void *opaque, Error **errp)
> +{
> +    RISCVCPU *cpu = RISCV_CPU(obj);
> +    uint8_t value;
> +
> +    visit_type_uint8(v, name, &value, errp);
> +
> +    if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
> +        cpu_set_prop_err(cpu, name, errp);
> +        return;
> +    }
> +
> +    if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) {
> +        error_setg(errp, "Number of PMP regions exceeds maximum available");
> +        return;
> +    } else if (value > MAX_RISCV_PMPS) {
> +        error_setg(errp, "Number of PMP regions exceeds maximum available");
> +        return;
> +    }
> +
> +    cpu_option_add_user_setting(name, value);
> +    cpu->cfg.pmp_regions = value;
> +}
> +
> +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
> +                                     void *opaque, Error **errp)
> +{
> +    uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions;
> +
> +    visit_type_uint8(v, name, &value, errp);
> +}
> +
> +static const PropertyInfo prop_num_pmp_regions = {
> +    .type = "uint8",
> +    .description = "num-pmp-regions",
> +    .get = prop_num_pmp_regions_get,
> +    .set = prop_num_pmp_regions_set,
> +};
> +
>  static int priv_spec_from_str(const char *priv_spec_str)
>  {
>      int priv_version = -1;
> @@ -2567,6 +2608,7 @@ static const Property riscv_cpu_properties[] = {
>
>      {.name = "mmu", .info = &prop_mmu},
>      {.name = "pmp", .info = &prop_pmp},
> +    {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
>
>      {.name = "priv_spec", .info = &prop_priv_spec},
>      {.name = "vext_spec", .info = &prop_vext_spec},
> @@ -2891,6 +2933,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>      DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
>          .cfg.mmu = true,
>          .cfg.pmp = true,
> +        .cfg.pmp_regions = 8,
>          .priv_spec = PRIV_VERSION_LATEST,
>      ),
>
> @@ -2937,7 +2980,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>          .cfg.max_satp_mode = VM_1_10_MBARE,
>          .cfg.ext_zifencei = true,
>          .cfg.ext_zicsr = true,
> -        .cfg.pmp = true
> +        .cfg.pmp = true,
> +        .cfg.pmp_regions = 8
>      ),
>
>      DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
> @@ -2948,7 +2992,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>          .cfg.ext_zifencei = true,
>          .cfg.ext_zicsr = true,
>          .cfg.mmu = true,
> -        .cfg.pmp = true
> +        .cfg.pmp = true,
> +        .cfg.pmp_regions = 8
>      ),
>
>  #if defined(TARGET_RISCV32) || \
> @@ -2966,6 +3011,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>          .cfg.ext_zifencei = true,
>          .cfg.ext_zicsr = true,
>          .cfg.pmp = true,
> +        .cfg.pmp_regions = 8,
>          .cfg.ext_smepmp = true,
>
>          .cfg.ext_zba = true,
> @@ -3040,6 +3086,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>          .cfg.ext_xtheadmempair = true,
>          .cfg.ext_xtheadsync = true,
>          .cfg.pmp = true,
> +        .cfg.pmp_regions = 8,
>
>          .cfg.mvendorid = THEAD_VENDOR_ID,
>
> @@ -3063,6 +3110,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>          .cfg.rvv_ta_all_1s = true,
>          .cfg.misa_w = true,
>          .cfg.pmp = true,
> +        .cfg.pmp_regions = 8,
>          .cfg.cbom_blocksize = 64,
>          .cfg.cbop_blocksize = 64,
>          .cfg.cboz_blocksize = 64,
> @@ -3119,6 +3167,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>          .cfg.ext_zifencei = true,
>          .cfg.ext_zicsr = true,
>          .cfg.pmp = true,
> +        .cfg.pmp_regions = 8,
>          .cfg.ext_zicbom = true,
>          .cfg.cbom_blocksize = 64,
>          .cfg.cboz_blocksize = 64,
> @@ -3163,6 +3212,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>
>          .cfg.mmu = true,
>          .cfg.pmp = true,
> +        .cfg.pmp_regions = 8,
>
>          .cfg.max_satp_mode = VM_1_10_SV39,
>      ),
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 229ade9ed9..67323a7d9d 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -159,7 +159,8 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
>
>  #define MMU_USER_IDX 3
>
> -#define MAX_RISCV_PMPS (16)
> +#define MAX_RISCV_PMPS (64)
> +#define OLD_MAX_RISCV_PMPS (16)
>
>  #if !defined(CONFIG_USER_ONLY)
>  #include "pmp.h"
> diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
> index 59f134a419..33c4f9bac8 100644
> --- a/target/riscv/cpu_cfg_fields.h.inc
> +++ b/target/riscv/cpu_cfg_fields.h.inc
> @@ -163,6 +163,7 @@ TYPED_FIELD(uint16_t, elen, 0)
>  TYPED_FIELD(uint16_t, cbom_blocksize, 0)
>  TYPED_FIELD(uint16_t, cbop_blocksize, 0)
>  TYPED_FIELD(uint16_t, cboz_blocksize, 0)
> +TYPED_FIELD(uint8_t,  pmp_regions, 0)
>
>  TYPED_FIELD(int8_t, max_satp_mode, -1)
>
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index d6cd441133..6296ecd1e1 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -738,7 +738,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
>  static RISCVException pmp(CPURISCVState *env, int csrno)
>  {
>      if (riscv_cpu_cfg(env)->pmp) {
> -        if (csrno <= CSR_PMPCFG3) {
> +        int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
> ++                              CSR_PMPCFG15 : CSR_PMPCFG3;
> +
> +        if (csrno <= max_pmpcfg) {
>              uint32_t reg_index = csrno - CSR_PMPCFG0;
>
>              /* TODO: RV128 restriction check */
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index c97e9ce9df..1600ec44f0 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
>      RISCVCPU *cpu = opaque;
>      CPURISCVState *env = &cpu->env;
>      int i;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          pmp_update_rule_addr(env, i);
>      }
>      pmp_update_rule_nums(env);
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index 5af295e410..3540327c9a 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -122,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
>   */
>  static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>  {
> -    if (pmp_index < MAX_RISCV_PMPS) {
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> +    if (pmp_index < pmp_regions) {
>          return env->pmp_state.pmp[pmp_index].cfg_reg;
>      }
>
> @@ -136,7 +138,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>   */
>  static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
>  {
> -    if (pmp_index < MAX_RISCV_PMPS) {
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> +    if (pmp_index < pmp_regions) {
>          if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
>              /* no change */
>              return false;
> @@ -236,9 +240,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
>  void pmp_update_rule_nums(CPURISCVState *env)
>  {
>      int i;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
>      env->pmp_state.num_rules = 0;
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          const uint8_t a_field =
>              pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
>          if (PMP_AMATCH_OFF != a_field) {
> @@ -332,6 +337,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
>      int pmp_size = 0;
>      hwaddr s = 0;
>      hwaddr e = 0;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
>      /* Short cut if no rules */
>      if (0 == pmp_get_num_rules(env)) {
> @@ -356,7 +362,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
>       * 1.10 draft priv spec states there is an implicit order
>       * from low to high
>       */
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          s = pmp_is_in_range(env, i, addr);
>          e = pmp_is_in_range(env, i, addr + pmp_size - 1);
>
> @@ -527,8 +533,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>  {
>      trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
>      bool is_next_cfg_tor = false;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> -    if (addr_index < MAX_RISCV_PMPS) {
> +    if (addr_index < pmp_regions) {
>          if (env->pmp_state.pmp[addr_index].addr_reg == val) {
>              /* no change */
>              return;
> @@ -538,7 +545,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>           * In TOR mode, need to check the lock bit of the next pmp
>           * (if there is a next).
>           */
> -        if (addr_index + 1 < MAX_RISCV_PMPS) {
> +        if (addr_index + 1 < pmp_regions) {
>              uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
>              is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
>
> @@ -573,8 +580,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
>  {
>      target_ulong val = 0;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> -    if (addr_index < MAX_RISCV_PMPS) {
> +    if (addr_index < pmp_regions) {
>          val = env->pmp_state.pmp[addr_index].addr_reg;
>          trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
>      } else {
> @@ -592,6 +600,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>  {
>      int i;
>      uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>      /* Update PMM field only if the value is valid according to Zjpm v1.0 */
>      if (riscv_cpu_cfg(env)->ext_smmpm &&
>          riscv_cpu_mxl(env) == MXL_RV64 &&
> @@ -603,7 +612,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>
>      /* RLB cannot be enabled if it's already 0 and if any regions are locked */
>      if (!MSECCFG_RLB_ISSET(env)) {
> -        for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +        for (i = 0; i < pmp_regions; i++) {
>              if (pmp_is_locked(env, i)) {
>                  val &= ~MSECCFG_RLB;
>                  break;
> @@ -659,6 +668,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
>      hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
>      hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
>      int i;
> +    uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
>      /*
>       * If PMP is not supported or there are no PMP rules, the TLB page will not
> @@ -669,7 +679,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
>          return TARGET_PAGE_SIZE;
>      }
>
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
>              continue;
>          }
> --
> 2.48.1
>
>