The virt PMU related implemention should belong to virt
machine file rather than common pmu.c which can be used
for other implementations.
Make pmu.c generic by moving all the virt PMU event related
structures to it's appropriate place.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
hw/riscv/virt.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
target/riscv/pmu.c | 73 ++++++++++++++++++++++++++++++------------------
2 files changed, 128 insertions(+), 26 deletions(-)
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index ee3129f3b314..ffda6d65d673 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -56,6 +56,61 @@
#include "qapi/qapi-visit-common.h"
#include "hw/virtio/virtio-iommu.h"
+static PMUEventInfo pmu_events_arr[] = {
+ {
+ .event_id = VIRT_PMU_EVENT_HW_CPU_CYCLES,
+ .counter_mask = 0x01,
+ },
+ {
+ .event_id = VIRT_PMU_EVENT_HW_INSTRUCTIONS,
+ .counter_mask = 0x04,
+ },
+ {
+ .event_id = VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS,
+ .counter_mask = 0,
+ },
+ {
+ .event_id = VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS,
+ .counter_mask = 0,
+ },
+ {
+ .event_id = VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS,
+ .counter_mask = 0,
+ },
+};
+
+static inline uint64_t virt_pmu_get_cycle_event_id(RISCVCPU *cpu)
+{
+ return VIRT_PMU_EVENT_HW_CPU_CYCLES;
+}
+
+static inline uint64_t virt_pmu_get_instret_event_id(RISCVCPU *cpu)
+{
+ return VIRT_PMU_EVENT_HW_INSTRUCTIONS;
+}
+
+static uint64_t virt_pmu_get_tlb_event_id(RISCVCPU *cpu,
+ MMUAccessType access_type)
+{
+ uint64_t tlb_event_type = ULONG_MAX;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ tlb_event_type = VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
+ break;
+ case MMU_DATA_LOAD:
+ tlb_event_type = VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS;
+ break;
+ case MMU_DATA_STORE:
+ tlb_event_type = VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
+ break;
+ default:
+ break;
+ }
+
+ return tlb_event_type;
+}
+
/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
static bool virt_use_kvm_aia(RISCVVirtState *s)
{
@@ -710,6 +765,29 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
aplic_phandles[socket] = aplic_s_phandle;
}
+static void virt_pmu_events_init(RISCVVirtState *s)
+{
+ int cpu, socket, i;
+ MachineState *ms = MACHINE(s);
+ int num_sockets = riscv_socket_count(ms);
+ RISCVCPU *hart;
+
+ for (socket = 0 ; socket < num_sockets; socket++) {
+ for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
+ hart = &s->soc[socket].harts[cpu];
+ hart->env.num_pmu_events = 5;
+ /* All hpmcounters can monitor all supported events */
+ for (i = 0; i < ARRAY_SIZE(pmu_events_arr); i++) {
+ pmu_events_arr[i].counter_mask |= hart->cfg.pmu_mask;
+ }
+ hart->env.pmu_events = pmu_events_arr;
+ hart->env.pmu_efuncs.get_cycle_id = virt_pmu_get_cycle_event_id;
+ hart->env.pmu_efuncs.get_intstret_id = virt_pmu_get_instret_event_id;
+ hart->env.pmu_efuncs.get_tlb_access_id = virt_pmu_get_tlb_event_id;
+ }
+ }
+}
+
static void create_fdt_pmu(RISCVVirtState *s)
{
g_autofree char *pmu_name = g_strdup_printf("/pmu");
@@ -1614,6 +1692,9 @@ static void virt_machine_init(MachineState *machine)
}
virt_flash_map(s, system_memory);
+ /* Setup the PMU Event details. This must happen before fdt setup */
+ virt_pmu_events_init(s);
+
/* load/create device tree */
if (machine->dtb) {
machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index c436b08d1043..3235388c66e4 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -304,7 +304,8 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum virt_pmu_event_idx event_idx)
bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
uint32_t target_ctr)
{
- RISCVCPU *cpu;
+ uint64_t event_idx = ULONG_MAX;
+ RISCVCPU *cpu = env_archcpu(env);
uint32_t ctr_idx;
/* Fixed instret counter */
@@ -312,9 +313,15 @@ bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
return true;
}
- cpu = env_archcpu(env);
- if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS,
- &ctr_idx)) {
+ if (env->pmu_efuncs.get_intstret_id) {
+ event_idx = env->pmu_efuncs.get_intstret_id(cpu);
+ }
+
+ if (event_idx == ULONG_MAX) {
+ return false;
+ }
+
+ if (!riscv_pmu_htable_lookup(cpu, event_idx, &ctr_idx)) {
return false;
}
@@ -323,7 +330,8 @@ bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
{
- RISCVCPU *cpu;
+ uint64_t event_idx = ULONG_MAX;
+ RISCVCPU *cpu = env_archcpu(env);
uint32_t ctr_idx;
/* Fixed mcycle counter */
@@ -331,9 +339,15 @@ bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
return true;
}
- cpu = env_archcpu(env);
- if (!riscv_pmu_htable_lookup(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES,
- &ctr_idx)) {
+ if (env->pmu_efuncs.get_cycle_id) {
+ event_idx = env->pmu_efuncs.get_cycle_id(cpu);
+ }
+
+ if (event_idx == ULONG_MAX) {
+ return false;
+ }
+
+ if (!riscv_pmu_htable_lookup(cpu, event_idx, &ctr_idx)) {
return false;
}
@@ -366,6 +380,8 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
RISCVCPU *cpu = env_archcpu(env);
uint32_t mapped_ctr_idx;
gint64 *eid_ptr;
+ bool valid_event = false;
+ int i;
if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
return -1;
@@ -389,15 +405,14 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
return 0;
}
- switch (event_idx) {
- case VIRT_PMU_EVENT_HW_CPU_CYCLES:
- case VIRT_PMU_EVENT_HW_INSTRUCTIONS:
- case VIRT_PMU_EVENT_CACHE_DTLB_READ_MISS:
- case VIRT_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
- case VIRT_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
- break;
- default:
- /* We don't support any raw events right now */
+ for (i = 0; i < env->num_pmu_events; i++) {
+ if (event_idx == env->pmu_events[i].event_id) {
+ valid_event = true;
+ break;
+ }
+ }
+
+ if (!valid_event) {
return -1;
}
eid_ptr = g_new(gint64, 1);
@@ -447,8 +462,7 @@ static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
return false;
}
-static void pmu_timer_trigger_irq(RISCVCPU *cpu,
- enum virt_pmu_event_idx evt_idx)
+static void pmu_timer_trigger_irq(RISCVCPU *cpu, uint64_t evt_idx)
{
uint32_t ctr_idx;
CPURISCVState *env = &cpu->env;
@@ -457,11 +471,6 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
uint64_t curr_ctr_val, curr_ctrh_val;
uint64_t ctr_val;
- if (evt_idx != VIRT_PMU_EVENT_HW_CPU_CYCLES &&
- evt_idx != VIRT_PMU_EVENT_HW_INSTRUCTIONS) {
- return;
- }
-
if (!riscv_pmu_htable_lookup(cpu, evt_idx, &ctr_idx)) {
return;
}
@@ -515,10 +524,22 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
void riscv_pmu_timer_cb(void *priv)
{
RISCVCPU *cpu = priv;
+ uint64_t event_idx;
+ CPURISCVState *env = &cpu->env;
/* Timer event was triggered only for these events */
- pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_CPU_CYCLES);
- pmu_timer_trigger_irq(cpu, VIRT_PMU_EVENT_HW_INSTRUCTIONS);
+ if (env->pmu_efuncs.get_cycle_id) {
+ event_idx = env->pmu_efuncs.get_cycle_id(cpu);
+ if (event_idx != ULONG_MAX) {
+ pmu_timer_trigger_irq(cpu, event_idx);
+ }
+ }
+ if (env->pmu_efuncs.get_intstret_id) {
+ event_idx = env->pmu_efuncs.get_intstret_id(cpu);
+ if (event_idx != ULONG_MAX) {
+ pmu_timer_trigger_irq(cpu, event_idx);
+ }
+ }
}
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
--
2.34.1
© 2016 - 2024 Red Hat, Inc.