From: Kaiwen Xue <kaiwenx@rivosinc.com>
QEMU only calculates dummy cycles and instructions, so there is no
actual means to stop the icount in QEMU. Hence this patch merely adds
the functionality of accessing the cfg registers, and cause no actual
effects on the counting of cycle and instret counters.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
---
target/riscv/cpu_bits.h | 12 +++++
target/riscv/csr.c | 138 +++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 149 insertions(+), 1 deletion(-)
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 5faa817453bb..32b068f18aa5 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -926,6 +926,18 @@ typedef enum RISCVException {
#define MHPMEVENT_BIT_VUINH BIT_ULL(58)
#define MHPMEVENTH_BIT_VUINH BIT(26)
+#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \
+ MHPMEVENT_BIT_SINH | \
+ MHPMEVENT_BIT_UINH | \
+ MHPMEVENT_BIT_VSINH | \
+ MHPMEVENT_BIT_VUINH)
+
+#define MHPMEVENTH_FILTER_MASK (MHPMEVENTH_BIT_MINH | \
+ MHPMEVENTH_BIT_SINH | \
+ MHPMEVENTH_BIT_UINH | \
+ MHPMEVENTH_BIT_VSINH | \
+ MHPMEVENTH_BIT_VUINH)
+
#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
#define MHPMEVENT_IDX_MASK 0xFFFFF
#define MHPMEVENT_SSCOF_RESVD 16
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 3ad851707e5c..b814d176cbb8 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -30,7 +30,6 @@
#include "qemu/guest-random.h"
#include "qapi/error.h"
-
/* CSR function table public API */
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
{
@@ -236,6 +235,24 @@ static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
return sscofpmf(env, csrno);
}
+static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smcntrpmf(env, csrno);
+}
+
static RISCVException any(CPURISCVState *env, int csrno)
{
return RISCV_EXCP_NONE;
@@ -830,6 +847,111 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
#else /* CONFIG_USER_ONLY */
+static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mcyclecfg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t inh_avail_mask;
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ env->mcyclecfg = val;
+ } else {
+ /* Set xINH fields if priv mode supported */
+ inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
+ inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
+ inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
+ env->mcyclecfg = val & inh_avail_mask;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mcyclecfgh;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
+ MCYCLECFGH_BIT_MINH);
+
+ /* Set xINH fields if priv mode supported */
+ inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
+ inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
+
+ env->mcyclecfgh = val & inh_avail_mask;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->minstretcfg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t inh_avail_mask;
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ env->minstretcfg = val;
+ } else {
+ inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
+ inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
+ inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
+ env->minstretcfg = val & inh_avail_mask;
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->minstretcfgh;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
+ MINSTRETCFGH_BIT_MINH);
+
+ inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
+ inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
+ inh_avail_mask |= (riscv_has_ext(env, RVH) &&
+ riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
+
+ env->minstretcfgh = val & inh_avail_mask;
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -5051,6 +5173,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_mcountinhibit,
.min_priv_ver = PRIV_VERSION_1_11_0 },
+ [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
+ write_mcyclecfg,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
+ write_minstretcfg,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
[CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
write_mhpmevent },
[CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
@@ -5110,6 +5239,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
write_mhpmevent },
+ [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
+ write_mcyclecfgh,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
+ write_minstretcfgh,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
[CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
--
2.34.1
On Fri, Jul 12, 2024 at 8:34 AM Atish Patra <atishp@rivosinc.com> wrote: > > From: Kaiwen Xue <kaiwenx@rivosinc.com> > > QEMU only calculates dummy cycles and instructions, so there is no > actual means to stop the icount in QEMU. Hence this patch merely adds > the functionality of accessing the cfg registers, and cause no actual > effects on the counting of cycle and instret counters. > > Signed-off-by: Atish Patra <atishp@rivosinc.com> > Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> > Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com> Acked-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > target/riscv/cpu_bits.h | 12 +++++ > target/riscv/csr.c | 138 +++++++++++++++++++++++++++++++++++++++++++++++- > 2 files changed, 149 insertions(+), 1 deletion(-) > > diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h > index 5faa817453bb..32b068f18aa5 100644 > --- a/target/riscv/cpu_bits.h > +++ b/target/riscv/cpu_bits.h > @@ -926,6 +926,18 @@ typedef enum RISCVException { > #define MHPMEVENT_BIT_VUINH BIT_ULL(58) > #define MHPMEVENTH_BIT_VUINH BIT(26) > > +#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \ > + MHPMEVENT_BIT_SINH | \ > + MHPMEVENT_BIT_UINH | \ > + MHPMEVENT_BIT_VSINH | \ > + MHPMEVENT_BIT_VUINH) > + > +#define MHPMEVENTH_FILTER_MASK (MHPMEVENTH_BIT_MINH | \ > + MHPMEVENTH_BIT_SINH | \ > + MHPMEVENTH_BIT_UINH | \ > + MHPMEVENTH_BIT_VSINH | \ > + MHPMEVENTH_BIT_VUINH) > + > #define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000) > #define MHPMEVENT_IDX_MASK 0xFFFFF > #define MHPMEVENT_SSCOF_RESVD 16 > diff --git a/target/riscv/csr.c b/target/riscv/csr.c > index 3ad851707e5c..b814d176cbb8 100644 > --- a/target/riscv/csr.c > +++ b/target/riscv/csr.c > @@ -30,7 +30,6 @@ > #include "qemu/guest-random.h" > #include "qapi/error.h" > > - > /* CSR function table public API */ > void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) > { > @@ -236,6 +235,24 @@ static RISCVException sscofpmf_32(CPURISCVState *env, int csrno) > return sscofpmf(env, csrno); > } > > +static RISCVException smcntrpmf(CPURISCVState *env, int csrno) > +{ > + if (!riscv_cpu_cfg(env)->ext_smcntrpmf) { > + return RISCV_EXCP_ILLEGAL_INST; > + } > + > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno) > +{ > + if (riscv_cpu_mxl(env) != MXL_RV32) { > + return RISCV_EXCP_ILLEGAL_INST; > + } > + > + return smcntrpmf(env, csrno); > +} > + > static RISCVException any(CPURISCVState *env, int csrno) > { > return RISCV_EXCP_NONE; > @@ -830,6 +847,111 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, > > #else /* CONFIG_USER_ONLY */ > > +static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno, > + target_ulong *val) > +{ > + *val = env->mcyclecfg; > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno, > + target_ulong val) > +{ > + uint64_t inh_avail_mask; > + > + if (riscv_cpu_mxl(env) == MXL_RV32) { > + env->mcyclecfg = val; > + } else { > + /* Set xINH fields if priv mode supported */ > + inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH; > + inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0; > + inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0; > + env->mcyclecfg = val & inh_avail_mask; > + } > + > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno, > + target_ulong *val) > +{ > + *val = env->mcyclecfgh; > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno, > + target_ulong val) > +{ > + target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | > + MCYCLECFGH_BIT_MINH); > + > + /* Set xINH fields if priv mode supported */ > + inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0; > + inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0; > + > + env->mcyclecfgh = val & inh_avail_mask; > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException read_minstretcfg(CPURISCVState *env, int csrno, > + target_ulong *val) > +{ > + *val = env->minstretcfg; > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException write_minstretcfg(CPURISCVState *env, int csrno, > + target_ulong val) > +{ > + uint64_t inh_avail_mask; > + > + if (riscv_cpu_mxl(env) == MXL_RV32) { > + env->minstretcfg = val; > + } else { > + inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH; > + inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0; > + inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0; > + env->minstretcfg = val & inh_avail_mask; > + } > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno, > + target_ulong *val) > +{ > + *val = env->minstretcfgh; > + return RISCV_EXCP_NONE; > +} > + > +static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno, > + target_ulong val) > +{ > + target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | > + MINSTRETCFGH_BIT_MINH); > + > + inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0; > + inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0; > + inh_avail_mask |= (riscv_has_ext(env, RVH) && > + riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0; > + > + env->minstretcfgh = val & inh_avail_mask; > + return RISCV_EXCP_NONE; > +} > + > static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, > target_ulong *val) > { > @@ -5051,6 +5173,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { > write_mcountinhibit, > .min_priv_ver = PRIV_VERSION_1_11_0 }, > > + [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg, > + write_mcyclecfg, > + .min_priv_ver = PRIV_VERSION_1_12_0 }, > + [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg, > + write_minstretcfg, > + .min_priv_ver = PRIV_VERSION_1_12_0 }, > + > [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent, > write_mhpmevent }, > [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent, > @@ -5110,6 +5239,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { > [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, > write_mhpmevent }, > > + [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh, > + write_mcyclecfgh, > + .min_priv_ver = PRIV_VERSION_1_12_0 }, > + [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh, > + write_minstretcfgh, > + .min_priv_ver = PRIV_VERSION_1_12_0 }, > + > [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh, > write_mhpmeventh, > .min_priv_ver = PRIV_VERSION_1_12_0 }, > > -- > 2.34.1 > >
© 2016 - 2024 Red Hat, Inc.