Previously, the number of PMP regions was hardcoded to 16 in QEMU.
This patch replaces the fixed value with a new `pmp_regions` field,
allowing platforms to configure the number of PMP regions.
If no specific value is provided, the default number of PMP regions
remains 16 to preserve the existing behavior.
A new CPU parameter num-pmp-regions has been introduced to the QEMU
command line. For example:
-cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
Reviewed-by: Frank Chang <frank.chang@sifive.com>
Signed-off-by: Jay Chang <jay.chang@sifive.com>
---
target/riscv/cpu.c | 54 +++++++++++++++++++++++++++++--
target/riscv/cpu.h | 2 +-
target/riscv/cpu_cfg_fields.h.inc | 1 +
target/riscv/csr.c | 5 ++-
target/riscv/machine.c | 3 +-
target/riscv/pmp.c | 28 ++++++++++------
6 files changed, 79 insertions(+), 14 deletions(-)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index ee20bd7ca2..9cab08f9df 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -1123,6 +1123,7 @@ static void riscv_cpu_init(Object *obj)
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
+ cpu->cfg.pmp_regions = 16;
cpu->env.vext_ver = VEXT_VERSION_1_00_0;
cpu->cfg.max_satp_mode = -1;
@@ -1574,6 +1575,46 @@ static const PropertyInfo prop_pmp = {
.set = prop_pmp_set,
};
+static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint16_t value;
+
+ visit_type_uint16(v, name, &value, errp);
+
+ if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
+ cpu_set_prop_err(cpu, name, errp);
+ return;
+ }
+
+ if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > 16) {
+ error_setg(errp, "Number of PMP regions exceeds maximum available");
+ return;
+ } else if (value > 64) {
+ error_setg(errp, "Number of PMP regions exceeds maximum available");
+ return;
+ }
+
+ cpu_option_add_user_setting(name, value);
+ cpu->cfg.pmp_regions = value;
+}
+
+static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ uint16_t value = RISCV_CPU(obj)->cfg.pmp_regions;
+
+ visit_type_uint16(v, name, &value, errp);
+}
+
+static const PropertyInfo prop_num_pmp_regions = {
+ .type = "uint16",
+ .description = "num-pmp-regions",
+ .get = prop_num_pmp_regions_get,
+ .set = prop_num_pmp_regions_set,
+};
+
static int priv_spec_from_str(const char *priv_spec_str)
{
int priv_version = -1;
@@ -2573,6 +2614,7 @@ static const Property riscv_cpu_properties[] = {
{.name = "mmu", .info = &prop_mmu},
{.name = "pmp", .info = &prop_pmp},
+ {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
{.name = "priv_spec", .info = &prop_priv_spec},
{.name = "vext_spec", .info = &prop_vext_spec},
@@ -2895,6 +2937,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
.cfg.mmu = true,
.cfg.pmp = true,
+ .cfg.pmp_regions = 8,
.priv_spec = PRIV_VERSION_LATEST,
),
@@ -2941,7 +2984,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.max_satp_mode = VM_1_10_MBARE,
.cfg.ext_zifencei = true,
.cfg.ext_zicsr = true,
- .cfg.pmp = true
+ .cfg.pmp = true,
+ .cfg.pmp_regions = 8
),
DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
@@ -2952,7 +2996,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.ext_zifencei = true,
.cfg.ext_zicsr = true,
.cfg.mmu = true,
- .cfg.pmp = true
+ .cfg.pmp = true,
+ .cfg.pmp_regions = 8
),
#if defined(TARGET_RISCV32) || \
@@ -2970,6 +3015,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.ext_zifencei = true,
.cfg.ext_zicsr = true,
.cfg.pmp = true,
+ .cfg.pmp_regions = 8,
.cfg.ext_smepmp = true,
.cfg.ext_zba = true,
@@ -3044,6 +3090,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.ext_xtheadmempair = true,
.cfg.ext_xtheadsync = true,
.cfg.pmp = true,
+ .cfg.pmp_regions = 8,
.cfg.mvendorid = THEAD_VENDOR_ID,
@@ -3067,6 +3114,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.rvv_ta_all_1s = true,
.cfg.misa_w = true,
.cfg.pmp = true,
+ .cfg.pmp_regions = 8,
.cfg.cbom_blocksize = 64,
.cfg.cbop_blocksize = 64,
.cfg.cboz_blocksize = 64,
@@ -3123,6 +3171,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.ext_zifencei = true,
.cfg.ext_zicsr = true,
.cfg.pmp = true,
+ .cfg.pmp_regions = 8,
.cfg.ext_zicbom = true,
.cfg.cbom_blocksize = 64,
.cfg.cboz_blocksize = 64,
@@ -3167,6 +3216,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.cfg.mmu = true,
.cfg.pmp = true,
+ .cfg.pmp_regions = 8,
.cfg.max_satp_mode = VM_1_10_SV39,
),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 679f417336..de4517c4f8 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -162,7 +162,7 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
#define MMU_USER_IDX 3
-#define MAX_RISCV_PMPS (16)
+#define MAX_RISCV_PMPS (64)
#if !defined(CONFIG_USER_ONLY)
#include "pmp.h"
diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
index 59f134a419..d80eb1eb7b 100644
--- a/target/riscv/cpu_cfg_fields.h.inc
+++ b/target/riscv/cpu_cfg_fields.h.inc
@@ -163,6 +163,7 @@ TYPED_FIELD(uint16_t, elen, 0)
TYPED_FIELD(uint16_t, cbom_blocksize, 0)
TYPED_FIELD(uint16_t, cbop_blocksize, 0)
TYPED_FIELD(uint16_t, cboz_blocksize, 0)
+TYPED_FIELD(uint16_t, pmp_regions, 0)
TYPED_FIELD(int8_t, max_satp_mode, -1)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 41cf469615..daaef8c438 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -736,7 +736,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
static RISCVException pmp(CPURISCVState *env, int csrno)
{
if (riscv_cpu_cfg(env)->pmp) {
- if (csrno <= CSR_PMPCFG3) {
+ uint16_t MAX_PMPCFG = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
++ CSR_PMPCFG15 : CSR_PMPCFG3;
+
+ if (csrno <= MAX_PMPCFG) {
uint32_t reg_index = csrno - CSR_PMPCFG0;
/* TODO: RV128 restriction check */
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index df2d5bad8d..e9a179ae55 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
RISCVCPU *cpu = opaque;
CPURISCVState *env = &cpu->env;
int i;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ for (i = 0; i < pmp_regions; i++) {
pmp_update_rule_addr(env, i);
}
pmp_update_rule_nums(env);
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index c685f7f2c5..3439295d41 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -121,7 +121,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
*/
static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
{
- if (pmp_index < MAX_RISCV_PMPS) {
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
+
+ if (pmp_index < pmp_regions) {
return env->pmp_state.pmp[pmp_index].cfg_reg;
}
@@ -135,7 +137,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
*/
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
- if (pmp_index < MAX_RISCV_PMPS) {
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
+
+ if (pmp_index < pmp_regions) {
if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
/* no change */
return false;
@@ -235,9 +239,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
void pmp_update_rule_nums(CPURISCVState *env)
{
int i;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
env->pmp_state.num_rules = 0;
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ for (i = 0; i < pmp_regions; i++) {
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
if (PMP_AMATCH_OFF != a_field) {
@@ -331,6 +336,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
int pmp_size = 0;
hwaddr s = 0;
hwaddr e = 0;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
@@ -355,7 +361,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
* 1.10 draft priv spec states there is an implicit order
* from low to high
*/
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ for (i = 0; i < pmp_regions; i++) {
s = pmp_is_in_range(env, i, addr);
e = pmp_is_in_range(env, i, addr + pmp_size - 1);
@@ -526,8 +532,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
{
trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
bool is_next_cfg_tor = false;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
- if (addr_index < MAX_RISCV_PMPS) {
+ if (addr_index < pmp_regions) {
if (env->pmp_state.pmp[addr_index].addr_reg == val) {
/* no change */
return;
@@ -537,7 +544,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
* In TOR mode, need to check the lock bit of the next pmp
* (if there is a next).
*/
- if (addr_index + 1 < MAX_RISCV_PMPS) {
+ if (addr_index + 1 < pmp_regions) {
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
@@ -572,8 +579,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
{
target_ulong val = 0;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
- if (addr_index < MAX_RISCV_PMPS) {
+ if (addr_index < pmp_regions) {
val = env->pmp_state.pmp[addr_index].addr_reg;
trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
} else {
@@ -591,6 +599,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
{
int i;
uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
/* Update PMM field only if the value is valid according to Zjpm v1.0 */
if (riscv_cpu_cfg(env)->ext_smmpm &&
riscv_cpu_mxl(env) == MXL_RV64 &&
@@ -602,7 +611,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
/* RLB cannot be enabled if it's already 0 and if any regions are locked */
if (!MSECCFG_RLB_ISSET(env)) {
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ for (i = 0; i < pmp_regions; i++) {
if (pmp_is_locked(env, i)) {
val &= ~MSECCFG_RLB;
break;
@@ -658,6 +667,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
int i;
+ uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
/*
* If PMP is not supported or there are no PMP rules, the TLB page will not
@@ -668,7 +678,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
return TARGET_PAGE_SIZE;
}
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ for (i = 0; i < pmp_regions; i++) {
if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
continue;
}
--
2.48.1
On Fri, Apr 25, 2025 at 7:46 PM Jay Chang <jay.chang@sifive.com> wrote:
>
> Previously, the number of PMP regions was hardcoded to 16 in QEMU.
> This patch replaces the fixed value with a new `pmp_regions` field,
> allowing platforms to configure the number of PMP regions.
>
> If no specific value is provided, the default number of PMP regions
> remains 16 to preserve the existing behavior.
>
> A new CPU parameter num-pmp-regions has been introduced to the QEMU
> command line. For example:
>
> -cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>
> ---
> target/riscv/cpu.c | 54 +++++++++++++++++++++++++++++--
> target/riscv/cpu.h | 2 +-
> target/riscv/cpu_cfg_fields.h.inc | 1 +
> target/riscv/csr.c | 5 ++-
> target/riscv/machine.c | 3 +-
> target/riscv/pmp.c | 28 ++++++++++------
> 6 files changed, 79 insertions(+), 14 deletions(-)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index ee20bd7ca2..9cab08f9df 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -1123,6 +1123,7 @@ static void riscv_cpu_init(Object *obj)
> cpu->cfg.cbom_blocksize = 64;
> cpu->cfg.cbop_blocksize = 64;
> cpu->cfg.cboz_blocksize = 64;
> + cpu->cfg.pmp_regions = 16;
> cpu->env.vext_ver = VEXT_VERSION_1_00_0;
> cpu->cfg.max_satp_mode = -1;
>
> @@ -1574,6 +1575,46 @@ static const PropertyInfo prop_pmp = {
> .set = prop_pmp_set,
> };
>
> +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
> + void *opaque, Error **errp)
> +{
> + RISCVCPU *cpu = RISCV_CPU(obj);
> + uint16_t value;
> +
> + visit_type_uint16(v, name, &value, errp);
> +
> + if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
> + cpu_set_prop_err(cpu, name, errp);
> + return;
> + }
> +
> + if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > 16) {
This 16 should be a macro
> + error_setg(errp, "Number of PMP regions exceeds maximum available");
> + return;
> + } else if (value > 64) {
Same here
> + error_setg(errp, "Number of PMP regions exceeds maximum available");
> + return;
> + }
> +
> + cpu_option_add_user_setting(name, value);
> + cpu->cfg.pmp_regions = value;
> +}
> +
> +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
> + void *opaque, Error **errp)
> +{
> + uint16_t value = RISCV_CPU(obj)->cfg.pmp_regions;
> +
> + visit_type_uint16(v, name, &value, errp);
> +}
> +
> +static const PropertyInfo prop_num_pmp_regions = {
> + .type = "uint16",
uint8?
> + .description = "num-pmp-regions",
> + .get = prop_num_pmp_regions_get,
> + .set = prop_num_pmp_regions_set,
> +};
> +
> static int priv_spec_from_str(const char *priv_spec_str)
> {
> int priv_version = -1;
> @@ -2573,6 +2614,7 @@ static const Property riscv_cpu_properties[] = {
>
> {.name = "mmu", .info = &prop_mmu},
> {.name = "pmp", .info = &prop_pmp},
> + {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
>
> {.name = "priv_spec", .info = &prop_priv_spec},
> {.name = "vext_spec", .info = &prop_vext_spec},
> @@ -2895,6 +2937,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
> .cfg.mmu = true,
> .cfg.pmp = true,
> + .cfg.pmp_regions = 8,
> .priv_spec = PRIV_VERSION_LATEST,
> ),
>
> @@ -2941,7 +2984,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.max_satp_mode = VM_1_10_MBARE,
> .cfg.ext_zifencei = true,
> .cfg.ext_zicsr = true,
> - .cfg.pmp = true
> + .cfg.pmp = true,
> + .cfg.pmp_regions = 8
> ),
>
> DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
> @@ -2952,7 +2996,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.ext_zifencei = true,
> .cfg.ext_zicsr = true,
> .cfg.mmu = true,
> - .cfg.pmp = true
> + .cfg.pmp = true,
> + .cfg.pmp_regions = 8
> ),
>
> #if defined(TARGET_RISCV32) || \
> @@ -2970,6 +3015,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.ext_zifencei = true,
> .cfg.ext_zicsr = true,
> .cfg.pmp = true,
> + .cfg.pmp_regions = 8,
> .cfg.ext_smepmp = true,
>
> .cfg.ext_zba = true,
> @@ -3044,6 +3090,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.ext_xtheadmempair = true,
> .cfg.ext_xtheadsync = true,
> .cfg.pmp = true,
> + .cfg.pmp_regions = 8,
>
> .cfg.mvendorid = THEAD_VENDOR_ID,
>
> @@ -3067,6 +3114,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.rvv_ta_all_1s = true,
> .cfg.misa_w = true,
> .cfg.pmp = true,
> + .cfg.pmp_regions = 8,
> .cfg.cbom_blocksize = 64,
> .cfg.cbop_blocksize = 64,
> .cfg.cboz_blocksize = 64,
> @@ -3123,6 +3171,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.ext_zifencei = true,
> .cfg.ext_zicsr = true,
> .cfg.pmp = true,
> + .cfg.pmp_regions = 8,
> .cfg.ext_zicbom = true,
> .cfg.cbom_blocksize = 64,
> .cfg.cboz_blocksize = 64,
> @@ -3167,6 +3216,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
>
> .cfg.mmu = true,
> .cfg.pmp = true,
> + .cfg.pmp_regions = 8,
>
> .cfg.max_satp_mode = VM_1_10_SV39,
> ),
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 679f417336..de4517c4f8 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -162,7 +162,7 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
>
> #define MMU_USER_IDX 3
>
> -#define MAX_RISCV_PMPS (16)
> +#define MAX_RISCV_PMPS (64)
>
> #if !defined(CONFIG_USER_ONLY)
> #include "pmp.h"
> diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
> index 59f134a419..d80eb1eb7b 100644
> --- a/target/riscv/cpu_cfg_fields.h.inc
> +++ b/target/riscv/cpu_cfg_fields.h.inc
> @@ -163,6 +163,7 @@ TYPED_FIELD(uint16_t, elen, 0)
> TYPED_FIELD(uint16_t, cbom_blocksize, 0)
> TYPED_FIELD(uint16_t, cbop_blocksize, 0)
> TYPED_FIELD(uint16_t, cboz_blocksize, 0)
> +TYPED_FIELD(uint16_t, pmp_regions, 0)
>
> TYPED_FIELD(int8_t, max_satp_mode, -1)
>
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 41cf469615..daaef8c438 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -736,7 +736,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
> static RISCVException pmp(CPURISCVState *env, int csrno)
> {
> if (riscv_cpu_cfg(env)->pmp) {
> - if (csrno <= CSR_PMPCFG3) {
> + uint16_t MAX_PMPCFG = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
> ++ CSR_PMPCFG15 : CSR_PMPCFG3;
MAX_PMPCFG isn't a macro it shouldn't be all caps
Alistair
> +
> + if (csrno <= MAX_PMPCFG) {
> uint32_t reg_index = csrno - CSR_PMPCFG0;
>
> /* TODO: RV128 restriction check */
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index df2d5bad8d..e9a179ae55 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
> RISCVCPU *cpu = opaque;
> CPURISCVState *env = &cpu->env;
> int i;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> pmp_update_rule_addr(env, i);
> }
> pmp_update_rule_nums(env);
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index c685f7f2c5..3439295d41 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -121,7 +121,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
> */
> static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
> {
> - if (pmp_index < MAX_RISCV_PMPS) {
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> + if (pmp_index < pmp_regions) {
> return env->pmp_state.pmp[pmp_index].cfg_reg;
> }
>
> @@ -135,7 +137,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
> */
> static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
> {
> - if (pmp_index < MAX_RISCV_PMPS) {
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> + if (pmp_index < pmp_regions) {
> if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
> /* no change */
> return false;
> @@ -235,9 +239,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
> void pmp_update_rule_nums(CPURISCVState *env)
> {
> int i;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> env->pmp_state.num_rules = 0;
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> const uint8_t a_field =
> pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
> if (PMP_AMATCH_OFF != a_field) {
> @@ -331,6 +336,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
> int pmp_size = 0;
> hwaddr s = 0;
> hwaddr e = 0;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> /* Short cut if no rules */
> if (0 == pmp_get_num_rules(env)) {
> @@ -355,7 +361,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
> * 1.10 draft priv spec states there is an implicit order
> * from low to high
> */
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> s = pmp_is_in_range(env, i, addr);
> e = pmp_is_in_range(env, i, addr + pmp_size - 1);
>
> @@ -526,8 +532,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> {
> trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
> bool is_next_cfg_tor = false;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> - if (addr_index < MAX_RISCV_PMPS) {
> + if (addr_index < pmp_regions) {
> if (env->pmp_state.pmp[addr_index].addr_reg == val) {
> /* no change */
> return;
> @@ -537,7 +544,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> * In TOR mode, need to check the lock bit of the next pmp
> * (if there is a next).
> */
> - if (addr_index + 1 < MAX_RISCV_PMPS) {
> + if (addr_index + 1 < pmp_regions) {
> uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
> is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
>
> @@ -572,8 +579,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
> {
> target_ulong val = 0;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> - if (addr_index < MAX_RISCV_PMPS) {
> + if (addr_index < pmp_regions) {
> val = env->pmp_state.pmp[addr_index].addr_reg;
> trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
> } else {
> @@ -591,6 +599,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
> {
> int i;
> uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> /* Update PMM field only if the value is valid according to Zjpm v1.0 */
> if (riscv_cpu_cfg(env)->ext_smmpm &&
> riscv_cpu_mxl(env) == MXL_RV64 &&
> @@ -602,7 +611,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>
> /* RLB cannot be enabled if it's already 0 and if any regions are locked */
> if (!MSECCFG_RLB_ISSET(env)) {
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> if (pmp_is_locked(env, i)) {
> val &= ~MSECCFG_RLB;
> break;
> @@ -658,6 +667,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
> hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
> hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
> int i;
> + uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> /*
> * If PMP is not supported or there are no PMP rules, the TLB page will not
> @@ -668,7 +678,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
> return TARGET_PAGE_SIZE;
> }
>
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
> continue;
> }
> --
> 2.48.1
>
>
© 2016 - 2025 Red Hat, Inc.