According to version 20250508 of the privileged specification, mcyclecfg
is a 64-bit register and mcyclecfgh refers to the top 32 bits of this
register when XLEN == 32. No real advantage is gained by keeping
them separate, and combining them allows for slight simplification.
Signed-off-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
target/riscv/cpu.h | 3 +--
target/riscv/csr.c | 28 +++++++++++++++++-----------
2 files changed, 18 insertions(+), 13 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 674a800d2f..a43d9c6b5b 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -419,8 +419,7 @@ struct CPUArchState {
uint32_t mcountinhibit;
/* PMU cycle & instret privilege mode filtering */
- target_ulong mcyclecfg;
- target_ulong mcyclecfgh;
+ uint64_t mcyclecfg;
target_ulong minstretcfg;
target_ulong minstretcfgh;
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 55110b4b66..ddd80ab68d 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -1062,7 +1062,8 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = env->mcyclecfg;
+ bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
+ *val = extract64(env->mcyclecfg, 0, rv32 ? 32 : 64);
return RISCV_EXCP_NONE;
}
@@ -1072,7 +1073,7 @@ static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
uint64_t inh_avail_mask;
if (riscv_cpu_mxl(env) == MXL_RV32) {
- env->mcyclecfg = val;
+ env->mcyclecfg = deposit64(env->mcyclecfg, 0, 32, val);
} else {
/* Set xINH fields if priv mode supported */
inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
@@ -1091,7 +1092,7 @@ static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = env->mcyclecfgh;
+ *val = extract64(env->mcyclecfg, 32, 32);
return RISCV_EXCP_NONE;
}
@@ -1109,7 +1110,7 @@ static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
- env->mcyclecfgh = val & inh_avail_mask;
+ env->mcyclecfg = deposit64(env->mcyclecfg, 32, 32, val & inh_avail_mask);
return RISCV_EXCP_NONE;
}
@@ -1248,8 +1249,7 @@ static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
g_assert(rv32 || !upper_half);
if (counter_idx == 0) {
- cfg_val = rv32 ? ((uint64_t)env->mcyclecfgh << 32) :
- env->mcyclecfg;
+ cfg_val = env->mcyclecfg;
} else if (counter_idx == 2) {
cfg_val = rv32 ? ((uint64_t)env->minstretcfgh << 32) :
env->minstretcfg;
@@ -1523,8 +1523,12 @@ static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
}
static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
- target_ulong new_val, target_ulong wr_mask)
+ target_ulong new_val, uint64_t wr_mask)
{
+ /*
+ * wr_mask is 64-bit so upper 32 bits of mcyclecfg and minstretcfg
+ * are retained.
+ */
switch (cfg_index) {
case 0: /* CYCLECFG */
if (wr_mask) {
@@ -1550,8 +1554,9 @@ static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
}
static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
- target_ulong new_val, target_ulong wr_mask)
+ target_ulong new_val, target_ulong wr_mask)
{
+ uint64_t cfgh;
if (riscv_cpu_mxl(env) != MXL_RV32) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -1559,12 +1564,13 @@ static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
switch (cfg_index) {
case 0: /* CYCLECFGH */
+ cfgh = extract64(env->mcyclecfg, 32, 32);
if (wr_mask) {
wr_mask &= ~MCYCLECFGH_BIT_MINH;
- env->mcyclecfgh = (new_val & wr_mask) |
- (env->mcyclecfgh & ~wr_mask);
+ cfgh = (new_val & wr_mask) | (cfgh & ~wr_mask);
+ env->mcyclecfg = deposit64(env->mcyclecfg, 32, 32, cfgh);
} else {
- *val = env->mcyclecfgh;
+ *val = cfgh;
}
break;
case 2: /* INSTRETCFGH */
--
2.51.0