The current PMU events are defined by SBI PMU
specification. As there is no standard event encoding
scheme, Virt machine chooses to use the SBI PMU encoding.
A platform may choose to implement a different event
encoding scheme completely.
Rename the event names to reflect the reality.
No functional changes introduced.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/cpu.h | 26 +++++++++++++++-----
target/riscv/cpu_helper.c | 8 +++---
target/riscv/pmu.c | 62 ++++++++++++++++++-----------------------------
target/riscv/pmu.h | 2 +-
4 files changed, 48 insertions(+), 50 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 97e408b91219..2ac391a7cf74 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -820,14 +820,28 @@ enum {
/*
* The event id are encoded based on the encoding specified in the
* SBI specification v0.3
+ *
+ * The event encoding is specified in the SBI specification
+ * Event idx is a 20bits wide number encoded as follows:
+ * event_idx[19:16] = type
+ * event_idx[15:0] = code
+ * The code field in cache events are encoded as follows:
+ * event_idx.code[15:3] = cache_id
+ * event_idx.code[2:1] = op_id
+ * event_idx.code[0:0] = result_id
*/
-enum riscv_pmu_event_idx {
- RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
- RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
- RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
- RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
- RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
+enum virt_pmu_event_idx {
+ /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
+ VIRT_PMU_EVENT_HW_CPU_CYCLES = 0x01,
+ /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
+ VIRT_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
+ /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
+ VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
+ /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
+ VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
+ /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
+ VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
};
/* used by tcg/tcg-cpu.c*/
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 203c0a92ab75..0f1655a221bd 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1295,17 +1295,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
{
- enum riscv_pmu_event_idx pmu_event_type;
+ enum virt_pmu_event_idx pmu_event_type;
switch (access_type) {
case MMU_INST_FETCH:
- pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
+ pmu_event_type = VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
break;
case MMU_DATA_LOAD:
- pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
+ pmu_event_type = VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS;
break;
case MMU_DATA_STORE:
- pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
+ pmu_event_type = VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
break;
default:
return;
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 2531d4f1a9c1..c436b08d1043 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -38,40 +38,24 @@ void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
{
uint32_t fdt_event_ctr_map[15] = {};
- /*
- * The event encoding is specified in the SBI specification
- * Event idx is a 20bits wide number encoded as follows:
- * event_idx[19:16] = type
- * event_idx[15:0] = code
- * The code field in cache events are encoded as follows:
- * event_idx.code[15:3] = cache_id
- * event_idx.code[2:1] = op_id
- * event_idx.code[0:0] = result_id
- */
-
- /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
- fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
- fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
+ fdt_event_ctr_map[0] = cpu_to_be32(VIRT_PMU_EVENT_HW_CPU_CYCLES);
+ fdt_event_ctr_map[1] = cpu_to_be32(VIRT_PMU_EVENT_HW_CPU_CYCLES);
fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
- /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
- fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
- fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
+ fdt_event_ctr_map[3] = cpu_to_be32(VIRT_PMU_EVENT_HW_INSTRUCTIONS);
+ fdt_event_ctr_map[4] = cpu_to_be32(VIRT_PMU_EVENT_HW_INSTRUCTIONS);
fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
- /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
- fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
- fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
+ fdt_event_ctr_map[6] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS);
+ fdt_event_ctr_map[7] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS);
fdt_event_ctr_map[8] = cpu_to_be32(cmask);
- /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
- fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
- fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
+ fdt_event_ctr_map[9] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS);
+ fdt_event_ctr_map[10] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS);
fdt_event_ctr_map[11] = cpu_to_be32(cmask);
- /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
- fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
- fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
+ fdt_event_ctr_map[12] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS);
+ fdt_event_ctr_map[13] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS);
fdt_event_ctr_map[14] = cpu_to_be32(cmask);
/* This a OpenSBI specific DT property documented in OpenSBI docs */
@@ -290,7 +274,7 @@ void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
riscv_pmu_icount_update_priv(env, newpriv, new_virt);
}
-int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
+int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx)
{
uint32_t ctr_idx;
int ret;
@@ -329,7 +313,7 @@ bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
}
cpu = env_archcpu(env);
- if (!riscv_pmu_htable_lookup(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS,
+ if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS,
&ctr_idx)) {
return false;
}
@@ -348,7 +332,7 @@ bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
}
cpu = env_archcpu(env);
- if (!riscv_pmu_htable_lookup(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES,
+ if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES,
&ctr_idx)) {
return false;
}
@@ -406,11 +390,11 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
}
switch (event_idx) {
- case RISCV_PMU_EVENT_HW_CPU_CYCLES:
- case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
- case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
- case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
- case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
+ case VIRT_PMU_EVENT_HW_CPU_CYCLES:
+ case VIRT_PMU_EVENT_HW_INSTRUCTIONS:
+ case VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS:
+ case VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
+ case VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
break;
default:
/* We don't support any raw events right now */
@@ -464,7 +448,7 @@ static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
}
static void pmu_timer_trigger_irq(RISCVCPU *cpu,
- enum riscv_pmu_event_idx evt_idx)
+ enum virt_pmu_event_idx evt_idx)
{
uint32_t ctr_idx;
CPURISCVState *env = &cpu->env;
@@ -473,8 +457,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
uint64_t curr_ctr_val, curr_ctrh_val;
uint64_t ctr_val;
- if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
- evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
+ if (evt_idx != VIRT_PMU_EVENT_HW_CPU_CYCLES &&
+ evt_idx != VIRT_PMU_EVENT_HW_INSTRUCTIONS) {
return;
}
@@ -533,8 +517,8 @@ void riscv_pmu_timer_cb(void *priv)
RISCVCPU *cpu = priv;
/* Timer event was triggered only for these events */
- pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
- pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
+ pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES);
+ pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS);
}
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
index 3853d0e2629e..75a22d596b69 100644
--- a/target/riscv/pmu.h
+++ b/target/riscv/pmu.h
@@ -30,7 +30,7 @@ void riscv_pmu_timer_cb(void *priv);
void riscv_pmu_init(RISCVCPU *cpu, Error **errp);
int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
-int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
+int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx);
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
--
2.34.1
On 10.10.2024 02:09, Atish Patra wrote:
> The current PMU events are defined by SBI PMU
> specification. As there is no standard event encoding
> scheme, Virt machine chooses to use the SBI PMU encoding.
> A platform may choose to implement a different event
> encoding scheme completely.
>
> Rename the event names to reflect the reality.
>
> No functional changes introduced.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> ---
> target/riscv/cpu.h | 26 +++++++++++++++-----
> target/riscv/cpu_helper.c | 8 +++---
> target/riscv/pmu.c | 62 ++++++++++++++++++-----------------------------
> target/riscv/pmu.h | 2 +-
> 4 files changed, 48 insertions(+), 50 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 97e408b91219..2ac391a7cf74 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -820,14 +820,28 @@ enum {
> /*
> * The event id are encoded based on the encoding specified in the
> * SBI specification v0.3
> + *
> + * The event encoding is specified in the SBI specification
> + * Event idx is a 20bits wide number encoded as follows:
> + * event_idx[19:16] = type
> + * event_idx[15:0] = code
> + * The code field in cache events are encoded as follows:
> + * event_idx.code[15:3] = cache_id
> + * event_idx.code[2:1] = op_id
> + * event_idx.code[0:0] = result_id
> */
>
> -enum riscv_pmu_event_idx {
> - RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
> - RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
> - RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
> - RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
> - RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
> +enum virt_pmu_event_idx {
> + /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
> + VIRT_PMU_EVENT_HW_CPU_CYCLES = 0x01,
> + /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
> + VIRT_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
> + /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
> + VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
> + /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
> + VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
> + /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
> + VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
Pretty sure, this is not a good idea to rename them since the generic
event even do not include TLB_* events as far as I know. It's acctually
better to just leave generic naming as is and let the machine handle
machine specific events separatly.
> };
>
> /* used by tcg/tcg-cpu.c*/
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 203c0a92ab75..0f1655a221bd 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -1295,17 +1295,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
>
> static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
> {
> - enum riscv_pmu_event_idx pmu_event_type;
> + enum virt_pmu_event_idx pmu_event_type;
>
> switch (access_type) {
> case MMU_INST_FETCH:
> - pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
> + pmu_event_type = VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
> break;
> case MMU_DATA_LOAD:
> - pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
> + pmu_event_type = VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS;
> break;
> case MMU_DATA_STORE:
> - pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
> + pmu_event_type = VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
> break;
> default:
> return;
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index 2531d4f1a9c1..c436b08d1043 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -38,40 +38,24 @@ void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
> {
> uint32_t fdt_event_ctr_map[15] = {};
>
> - /*
> - * The event encoding is specified in the SBI specification
> - * Event idx is a 20bits wide number encoded as follows:
> - * event_idx[19:16] = type
> - * event_idx[15:0] = code
> - * The code field in cache events are encoded as follows:
> - * event_idx.code[15:3] = cache_id
> - * event_idx.code[2:1] = op_id
> - * event_idx.code[0:0] = result_id
> - */
> -
> - /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
> - fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
> - fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
> + fdt_event_ctr_map[0] = cpu_to_be32(VIRT_PMU_EVENT_HW_CPU_CYCLES);
> + fdt_event_ctr_map[1] = cpu_to_be32(VIRT_PMU_EVENT_HW_CPU_CYCLES);
> fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
>
> - /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
> - fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
> - fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
> + fdt_event_ctr_map[3] = cpu_to_be32(VIRT_PMU_EVENT_HW_INSTRUCTIONS);
> + fdt_event_ctr_map[4] = cpu_to_be32(VIRT_PMU_EVENT_HW_INSTRUCTIONS);
> fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
>
> - /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
> - fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
> - fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
> + fdt_event_ctr_map[6] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS);
> + fdt_event_ctr_map[7] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS);
> fdt_event_ctr_map[8] = cpu_to_be32(cmask);
>
> - /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
> - fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
> - fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
> + fdt_event_ctr_map[9] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS);
> + fdt_event_ctr_map[10] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS);
> fdt_event_ctr_map[11] = cpu_to_be32(cmask);
>
> - /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
> - fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
> - fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
> + fdt_event_ctr_map[12] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS);
> + fdt_event_ctr_map[13] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS);
> fdt_event_ctr_map[14] = cpu_to_be32(cmask);
Ok, I guess it's time to do smthng generic to this function, cz if
number of supported machines will go up it's going to be a problem I guess.
>
> /* This a OpenSBI specific DT property documented in OpenSBI docs */
> @@ -290,7 +274,7 @@ void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> riscv_pmu_icount_update_priv(env, newpriv, new_virt);
> }
>
> -int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
> +int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx)
> {
> uint32_t ctr_idx;
> int ret;
> @@ -329,7 +313,7 @@ bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
> }
>
> cpu = env_archcpu(env);
> - if (!riscv_pmu_htable_lookup(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS,
> + if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS,
> &ctr_idx)) {
> return false;
> }
> @@ -348,7 +332,7 @@ bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
> }
>
> cpu = env_archcpu(env);
> - if (!riscv_pmu_htable_lookup(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES,
> + if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES,
> &ctr_idx)) {
> return false;
> }
> @@ -406,11 +390,11 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
> }
>
> switch (event_idx) {
> - case RISCV_PMU_EVENT_HW_CPU_CYCLES:
> - case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
> - case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
> - case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
> - case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
> + case VIRT_PMU_EVENT_HW_CPU_CYCLES:
> + case VIRT_PMU_EVENT_HW_INSTRUCTIONS:
> + case VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS:
> + case VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
> + case VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
> break;
> default:
> /* We don't support any raw events right now */
> @@ -464,7 +448,7 @@ static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
> }
>
> static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> - enum riscv_pmu_event_idx evt_idx)
> + enum virt_pmu_event_idx evt_idx)
> {
> uint32_t ctr_idx;
> CPURISCVState *env = &cpu->env;
> @@ -473,8 +457,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> uint64_t curr_ctr_val, curr_ctrh_val;
> uint64_t ctr_val;
>
> - if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
> - evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
> + if (evt_idx != VIRT_PMU_EVENT_HW_CPU_CYCLES &&
> + evt_idx != VIRT_PMU_EVENT_HW_INSTRUCTIONS) {
> return;
> }
>
> @@ -533,8 +517,8 @@ void riscv_pmu_timer_cb(void *priv)
> RISCVCPU *cpu = priv;
>
> /* Timer event was triggered only for these events */
> - pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
> - pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
> + pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES);
> + pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS);
> }
>
> int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
> diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
> index 3853d0e2629e..75a22d596b69 100644
> --- a/target/riscv/pmu.h
> +++ b/target/riscv/pmu.h
> @@ -30,7 +30,7 @@ void riscv_pmu_timer_cb(void *priv);
> void riscv_pmu_init(RISCVCPU *cpu, Error **errp);
> int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
> uint32_t ctr_idx);
> -int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
> +int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx);
> void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
> int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
> uint32_t ctr_idx);
>
On Thu, Oct 10, 2024 at 5:10 AM Alexei Filippov
<alexei.filippov@yadro.com> wrote:
>
>
>
> On 10.10.2024 02:09, Atish Patra wrote:
> > The current PMU events are defined by SBI PMU
> > specification. As there is no standard event encoding
> > scheme, Virt machine chooses to use the SBI PMU encoding.
> > A platform may choose to implement a different event
> > encoding scheme completely.
> >
> > Rename the event names to reflect the reality.
> >
> > No functional changes introduced.
> >
> > Signed-off-by: Atish Patra <atishp@rivosinc.com>
> > ---
> > target/riscv/cpu.h | 26 +++++++++++++++-----
> > target/riscv/cpu_helper.c | 8 +++---
> > target/riscv/pmu.c | 62 ++++++++++++++++++-----------------------------
> > target/riscv/pmu.h | 2 +-
> > 4 files changed, 48 insertions(+), 50 deletions(-)
> >
> > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> > index 97e408b91219..2ac391a7cf74 100644
> > --- a/target/riscv/cpu.h
> > +++ b/target/riscv/cpu.h
> > @@ -820,14 +820,28 @@ enum {
> > /*
> > * The event id are encoded based on the encoding specified in the
> > * SBI specification v0.3
> > + *
> > + * The event encoding is specified in the SBI specification
> > + * Event idx is a 20bits wide number encoded as follows:
> > + * event_idx[19:16] = type
> > + * event_idx[15:0] = code
> > + * The code field in cache events are encoded as follows:
> > + * event_idx.code[15:3] = cache_id
> > + * event_idx.code[2:1] = op_id
> > + * event_idx.code[0:0] = result_id
> > */
> >
> > -enum riscv_pmu_event_idx {
> > - RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
> > - RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
> > - RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
> > - RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
> > - RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
> > +enum virt_pmu_event_idx {
> > + /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
> > + VIRT_PMU_EVENT_HW_CPU_CYCLES = 0x01,
> > + /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
> > + VIRT_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
> > + /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
> > + VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
> > + /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
> > + VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
> > + /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
> > + VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
> Pretty sure, this is not a good idea to rename them since the generic
> event even do not include TLB_* events as far as I know. It's acctually
> better to just leave generic naming as is and let the machine handle
> machine specific events separatly.
These event names are defined in SBI specification which virt machine
implements.
https://github.com/riscv-non-isa/riscv-sbi-doc/blob/master/src/ext-pmu.adoc#event-hardware-cache-events-type-1
These are not RISC-V standard events. As I mentioned in the cover
letter, there are no standard RISC-V event names.
Adding the RISCV_PMU prefix is confusing as there is a performance
event TG which is trying to define standard events.
> > };
> >
> > /* used by tcg/tcg-cpu.c*/
> > diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> > index 203c0a92ab75..0f1655a221bd 100644
> > --- a/target/riscv/cpu_helper.c
> > +++ b/target/riscv/cpu_helper.c
> > @@ -1295,17 +1295,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> >
> > static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
> > {
> > - enum riscv_pmu_event_idx pmu_event_type;
> > + enum virt_pmu_event_idx pmu_event_type;
> >
> > switch (access_type) {
> > case MMU_INST_FETCH:
> > - pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
> > + pmu_event_type = VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
> > break;
> > case MMU_DATA_LOAD:
> > - pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
> > + pmu_event_type = VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS;
> > break;
> > case MMU_DATA_STORE:
> > - pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
> > + pmu_event_type = VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
> > break;
> > default:
> > return;
> > diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> > index 2531d4f1a9c1..c436b08d1043 100644
> > --- a/target/riscv/pmu.c
> > +++ b/target/riscv/pmu.c
> > @@ -38,40 +38,24 @@ void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
> > {
> > uint32_t fdt_event_ctr_map[15] = {};
> >
> > - /*
> > - * The event encoding is specified in the SBI specification
> > - * Event idx is a 20bits wide number encoded as follows:
> > - * event_idx[19:16] = type
> > - * event_idx[15:0] = code
> > - * The code field in cache events are encoded as follows:
> > - * event_idx.code[15:3] = cache_id
> > - * event_idx.code[2:1] = op_id
> > - * event_idx.code[0:0] = result_id
> > - */
> > -
> > - /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
> > - fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
> > - fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
> > + fdt_event_ctr_map[0] = cpu_to_be32(VIRT_PMU_EVENT_HW_CPU_CYCLES);
> > + fdt_event_ctr_map[1] = cpu_to_be32(VIRT_PMU_EVENT_HW_CPU_CYCLES);
> > fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
> >
> > - /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
> > - fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
> > - fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
> > + fdt_event_ctr_map[3] = cpu_to_be32(VIRT_PMU_EVENT_HW_INSTRUCTIONS);
> > + fdt_event_ctr_map[4] = cpu_to_be32(VIRT_PMU_EVENT_HW_INSTRUCTIONS);
> > fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
> >
> > - /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
> > - fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
> > - fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
> > + fdt_event_ctr_map[6] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS);
> > + fdt_event_ctr_map[7] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS);
> > fdt_event_ctr_map[8] = cpu_to_be32(cmask);
> >
> > - /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
> > - fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
> > - fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
> > + fdt_event_ctr_map[9] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS);
> > + fdt_event_ctr_map[10] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS);
> > fdt_event_ctr_map[11] = cpu_to_be32(cmask);
> >
> > - /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
> > - fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
> > - fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
> > + fdt_event_ctr_map[12] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS);
> > + fdt_event_ctr_map[13] = cpu_to_be32(VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS);
> > fdt_event_ctr_map[14] = cpu_to_be32(cmask);
> Ok, I guess it's time to do smthng generic to this function, cz if
> number of supported machines will go up it's going to be a problem I guess.
> >
We can define a generic helper function in the pmu.c. Keep in mind
this is only required for the machines
without counter delegation extensions. In a few years, every new
platform will probably implement counter delegation
which voids the requirement of SBI PMU DT node.
> > /* This a OpenSBI specific DT property documented in OpenSBI docs */
> > @@ -290,7 +274,7 @@ void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> > riscv_pmu_icount_update_priv(env, newpriv, new_virt);
> > }
> >
> > -int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
> > +int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx)
> > {
> > uint32_t ctr_idx;
> > int ret;
> > @@ -329,7 +313,7 @@ bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
> > }
> >
> > cpu = env_archcpu(env);
> > - if (!riscv_pmu_htable_lookup(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS,
> > + if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS,
> > &ctr_idx)) {
> > return false;
> > }
> > @@ -348,7 +332,7 @@ bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
> > }
> >
> > cpu = env_archcpu(env);
> > - if (!riscv_pmu_htable_lookup(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES,
> > + if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES,
> > &ctr_idx)) {
> > return false;
> > }
> > @@ -406,11 +390,11 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
> > }
> >
> > switch (event_idx) {
> > - case RISCV_PMU_EVENT_HW_CPU_CYCLES:
> > - case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
> > - case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
> > - case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
> > - case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
> > + case VIRT_PMU_EVENT_HW_CPU_CYCLES:
> > + case VIRT_PMU_EVENT_HW_INSTRUCTIONS:
> > + case VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS:
> > + case VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
> > + case VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
> > break;
> > default:
> > /* We don't support any raw events right now */
> > @@ -464,7 +448,7 @@ static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
> > }
> >
> > static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> > - enum riscv_pmu_event_idx evt_idx)
> > + enum virt_pmu_event_idx evt_idx)
> > {
> > uint32_t ctr_idx;
> > CPURISCVState *env = &cpu->env;
> > @@ -473,8 +457,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> > uint64_t curr_ctr_val, curr_ctrh_val;
> > uint64_t ctr_val;
> >
> > - if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
> > - evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
> > + if (evt_idx != VIRT_PMU_EVENT_HW_CPU_CYCLES &&
> > + evt_idx != VIRT_PMU_EVENT_HW_INSTRUCTIONS) {
> > return;
> > }
> >
> > @@ -533,8 +517,8 @@ void riscv_pmu_timer_cb(void *priv)
> > RISCVCPU *cpu = priv;
> >
> > /* Timer event was triggered only for these events */
> > - pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
> > - pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
> > + pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES);
> > + pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS);
> > }
> >
> > int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
> > diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
> > index 3853d0e2629e..75a22d596b69 100644
> > --- a/target/riscv/pmu.h
> > +++ b/target/riscv/pmu.h
> > @@ -30,7 +30,7 @@ void riscv_pmu_timer_cb(void *priv);
> > void riscv_pmu_init(RISCVCPU *cpu, Error **errp);
> > int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
> > uint32_t ctr_idx);
> > -int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
> > +int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx);
> > void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
> > int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
> > uint32_t ctr_idx);
> >
© 2016 - 2026 Red Hat, Inc.