[PATCH v2] target/riscv: Add support for machine specific pmu's events

Alexei Filippov posted 1 patch 5 months ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20240625144643.34733-1-alexei.filippov@syntacore.com
Maintainers: Palmer Dabbelt <palmer@dabbelt.com>, Alistair Francis <alistair.francis@wdc.com>, Bin Meng <bmeng.cn@gmail.com>, Weiwei Li <liwei1518@gmail.com>, Daniel Henrique Barboza <dbarboza@ventanamicro.com>, Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
target/riscv/cpu.h |   9 +++
target/riscv/csr.c |  43 +++++++++-----
target/riscv/pmu.c | 139 ++++++++++++++++++++++-----------------------
target/riscv/pmu.h |  11 ++--
4 files changed, 115 insertions(+), 87 deletions(-)
[PATCH v2] target/riscv: Add support for machine specific pmu's events
Posted by Alexei Filippov 5 months ago
Was added call backs for machine specific pmu events.
Simplify monitor functions by adding new hash table, which going to map
counter number and event index.
Was added read/write callbacks which going to simplify support for events,
which expected to have different behavior.

Signed-off-by: Alexei Filippov <alexei.filippov@syntacore.com>
---
Changes since v2:
		-rebased to latest master
 target/riscv/cpu.h |   9 +++
 target/riscv/csr.c |  43 +++++++++-----
 target/riscv/pmu.c | 139 ++++++++++++++++++++++-----------------------
 target/riscv/pmu.h |  11 ++--
 4 files changed, 115 insertions(+), 87 deletions(-)

diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 6fe0d712b4..fbf82b050b 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -374,6 +374,13 @@ struct CPUArchState {
     uint64_t (*rdtime_fn)(void *);
     void *rdtime_fn_arg;
 
+    /*machine specific pmu callback */
+    void (*pmu_ctr_write)(PMUCTRState *counter, uint32_t event_idx,
+                          target_ulong val, bool high_half);
+    target_ulong (*pmu_ctr_read)(PMUCTRState *counter, uint32_t event_idx,
+                                 bool high_half);
+    bool (*pmu_vendor_support)(uint32_t event_idx);
+
     /* machine specific AIA ireg read-modify-write callback */
 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
     ((((__xlen) & 0xff) << 24) | \
@@ -455,6 +462,8 @@ struct ArchCPU {
     uint32_t pmu_avail_ctrs;
     /* Mapping of events to counters */
     GHashTable *pmu_event_ctr_map;
+    /* Mapping of counters to events */
+    GHashTable *pmu_ctr_event_map;
     const GPtrArray *decoders;
 };
 
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 58ef7079dc..b541852c84 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -875,20 +875,25 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
     int ctr_idx = csrno - CSR_MCYCLE;
     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
     uint64_t mhpmctr_val = val;
+    int event_idx;
 
     counter->mhpmcounter_val = val;
-    if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
-        riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
-        counter->mhpmcounter_prev = get_ticks(false);
-        if (ctr_idx > 2) {
+    event_idx = riscv_pmu_get_event_by_ctr(env, ctr_idx);
+
+    if (event_idx != RISCV_PMU_EVENT_NOT_PRESENTED) {
+        if (RISCV_PMU_CTR_IS_HPM(ctr_idx) && env->pmu_ctr_write) {
+            env->pmu_ctr_write(counter, event_idx, val, false);
+        } else {
+            counter->mhpmcounter_prev = get_ticks(false);
+        }
+        if (RISCV_PMU_CTR_IS_HPM(ctr_idx)) {
             if (riscv_cpu_mxl(env) == MXL_RV32) {
                 mhpmctr_val = mhpmctr_val |
                               ((uint64_t)counter->mhpmcounterh_val << 32);
             }
             riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
         }
-     } else {
-        /* Other counters can keep incrementing from the given value */
+    } else {
         counter->mhpmcounter_prev = val;
     }
 
@@ -902,13 +907,19 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
     uint64_t mhpmctr_val = counter->mhpmcounter_val;
     uint64_t mhpmctrh_val = val;
+    int event_idx;
 
     counter->mhpmcounterh_val = val;
     mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
-    if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
-        riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
-        counter->mhpmcounterh_prev = get_ticks(true);
-        if (ctr_idx > 2) {
+    event_idx = riscv_pmu_get_event_by_ctr(env, ctr_idx);
+
+    if (event_idx != RISCV_PMU_EVENT_NOT_PRESENTED) {
+        if (RISCV_PMU_CTR_IS_HPM(ctr_idx) && env->pmu_ctr_write) {
+            env->pmu_ctr_write(counter, event_idx, val, true);
+        } else {
+            counter->mhpmcounterh_prev = get_ticks(true);
+        }
+        if (RISCV_PMU_CTR_IS_HPM(ctr_idx)) {
             riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
         }
     } else {
@@ -926,6 +937,7 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
                                          counter->mhpmcounter_prev;
     target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
                                         counter->mhpmcounter_val;
+    int event_idx;
 
     if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
         /*
@@ -946,9 +958,14 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
      * The kernel computes the perf delta by subtracting the current value from
      * the value it initialized previously (ctr_val).
      */
-    if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
-        riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
-        *val = get_ticks(upper_half) - ctr_prev + ctr_val;
+    event_idx = riscv_pmu_get_event_by_ctr(env, ctr_idx);
+    if (event_idx != RISCV_PMU_EVENT_NOT_PRESENTED) {
+        if (RISCV_PMU_CTR_IS_HPM(ctr_idx) && env->pmu_ctr_read) {
+            *val = env->pmu_ctr_read(counter, event_idx,
+                                     upper_half);
+        } else {
+            *val = get_ticks(upper_half) - ctr_prev + ctr_val;
+        }
     } else {
         *val = ctr_val;
     }
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 0e7d58b8a5..c3b6b20337 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -88,7 +88,7 @@ static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
     }
 }
 
-static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
+bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
 {
     CPURISCVState *env = &cpu->env;
 
@@ -207,59 +207,28 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
     return ret;
 }
 
-bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
-                                        uint32_t target_ctr)
+int riscv_pmu_get_event_by_ctr(CPURISCVState *env,
+                               uint32_t target_ctr)
 {
     RISCVCPU *cpu;
     uint32_t event_idx;
-    uint32_t ctr_idx;
 
-    /* Fixed instret counter */
-    if (target_ctr == 2) {
-        return true;
+    if (target_ctr < 3) {
+        return target_ctr;
     }
 
     cpu = env_archcpu(env);
-    if (!cpu->pmu_event_ctr_map) {
-        return false;
+    if (!cpu->pmu_ctr_event_map || !cpu->pmu_event_ctr_map) {
+        return RISCV_PMU_EVENT_NOT_PRESENTED;
     }
 
-    event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS;
-    ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
-                               GUINT_TO_POINTER(event_idx)));
-    if (!ctr_idx) {
-        return false;
+    event_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_ctr_event_map,
+                                                     GUINT_TO_POINTER(target_ctr)));
+    if (!event_idx) {
+        return RISCV_PMU_EVENT_NOT_PRESENTED;
     }
 
-    return target_ctr == ctr_idx ? true : false;
-}
-
-bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
-{
-    RISCVCPU *cpu;
-    uint32_t event_idx;
-    uint32_t ctr_idx;
-
-    /* Fixed mcycle counter */
-    if (target_ctr == 0) {
-        return true;
-    }
-
-    cpu = env_archcpu(env);
-    if (!cpu->pmu_event_ctr_map) {
-        return false;
-    }
-
-    event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES;
-    ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
-                               GUINT_TO_POINTER(event_idx)));
-
-    /* Counter zero is not used for event_ctr_map */
-    if (!ctr_idx) {
-        return false;
-    }
-
-    return (target_ctr == ctr_idx) ? true : false;
+    return event_idx;
 }
 
 static gboolean pmu_remove_event_map(gpointer key, gpointer value,
@@ -268,6 +237,12 @@ static gboolean pmu_remove_event_map(gpointer key, gpointer value,
     return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false;
 }
 
+static gboolean pmu_remove_ctr_map(gpointer key, gpointer value,
+                                   gpointer udata)
+{
+    return (GPOINTER_TO_UINT(key) == GPOINTER_TO_UINT(udata)) ? true : false;
+}
+
 static int64_t pmu_icount_ticks_to_ns(int64_t value)
 {
     int64_t ret = 0;
@@ -286,8 +261,11 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
 {
     uint32_t event_idx;
     RISCVCPU *cpu = env_archcpu(env);
+    bool machine_specific = false;
 
-    if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
+    if (!riscv_pmu_counter_valid(cpu, ctr_idx) ||
+        !cpu->pmu_event_ctr_map ||
+        !cpu->pmu_ctr_event_map) {
         return -1;
     }
 
@@ -299,6 +277,9 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
         g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
                                     pmu_remove_event_map,
                                     GUINT_TO_POINTER(ctr_idx));
+        g_hash_table_foreach_remove(cpu->pmu_ctr_event_map,
+                                    pmu_remove_ctr_map,
+                                    GUINT_TO_POINTER(ctr_idx));
         return 0;
     }
 
@@ -308,40 +289,39 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
         return 0;
     }
 
-    switch (event_idx) {
-    case RISCV_PMU_EVENT_HW_CPU_CYCLES:
-    case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
-    case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
-    case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
-    case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
-        break;
-    default:
-        /* We don't support any raw events right now */
-        return -1;
+    if (RISCV_PMU_CTR_IS_HPM(ctr_idx) && env->pmu_vendor_support) {
+        machine_specific = env->pmu_vendor_support(event_idx);
+    }
+
+    if (!machine_specific) {
+        switch (event_idx) {
+        case RISCV_PMU_EVENT_HW_CPU_CYCLES:
+        case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
+        case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
+        case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
+        case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
+            break;
+        default:
+            return -1;
+        }
     }
     g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx),
                         GUINT_TO_POINTER(ctr_idx));
+    g_hash_table_insert(cpu->pmu_ctr_event_map, GUINT_TO_POINTER(ctr_idx),
+                        GUINT_TO_POINTER(event_idx));
 
     return 0;
 }
 
 static void pmu_timer_trigger_irq(RISCVCPU *cpu,
-                                  enum riscv_pmu_event_idx evt_idx)
+                                  uint32_t ctr_idx)
 {
-    uint32_t ctr_idx;
     CPURISCVState *env = &cpu->env;
     PMUCTRState *counter;
     target_ulong *mhpmevent_val;
     uint64_t of_bit_mask;
     int64_t irq_trigger_at;
 
-    if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
-        evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
-        return;
-    }
-
-    ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
-                               GUINT_TO_POINTER(evt_idx)));
     if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
         return;
     }
@@ -349,7 +329,7 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
     if (riscv_cpu_mxl(env) == MXL_RV32) {
         mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
         of_bit_mask = MHPMEVENTH_BIT_OF;
-     } else {
+    } else {
         mhpmevent_val = &env->mhpmevent_val[ctr_idx];
         of_bit_mask = MHPMEVENT_BIT_OF;
     }
@@ -372,14 +352,25 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
     }
 }
 
+static void riscv_pmu_timer_trigger_irq(gpointer ctr, gpointer event_idx,
+                                            gpointer opaque)
+{
+    RISCVCPU *cpu = opaque;
+
+    pmu_timer_trigger_irq(cpu, GPOINTER_TO_UINT(ctr));
+}
+
 /* Timer callback for instret and cycle counter overflow */
 void riscv_pmu_timer_cb(void *priv)
 {
     RISCVCPU *cpu = priv;
 
-    /* Timer event was triggered only for these events */
-    pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
-    pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
+    if (!cpu->pmu_ctr_event_map || !cpu->pmu_event_ctr_map) {
+        return;
+    }
+    g_hash_table_foreach(cpu->pmu_ctr_event_map,
+                         riscv_pmu_timer_trigger_irq,
+                         cpu);
 }
 
 int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
@@ -388,6 +379,7 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
     int64_t overflow_ns, overflow_left = 0;
     RISCVCPU *cpu = env_archcpu(env);
     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
+    uint32_t event_idx;
 
     if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
         return -1;
@@ -408,8 +400,9 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
         overflow_left = overflow_delta - INT64_MAX;
     }
 
-    if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
-        riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+    event_idx = riscv_pmu_get_event_by_ctr(env, ctr_idx);
+
+    if (event_idx != RISCV_PMU_EVENT_NOT_PRESENTED) {
         overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta);
         overflow_left = pmu_icount_ticks_to_ns(overflow_left) ;
     } else {
@@ -443,7 +436,13 @@ void riscv_pmu_init(RISCVCPU *cpu, Error **errp)
 
     cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
     if (!cpu->pmu_event_ctr_map) {
-        error_setg(errp, "Unable to allocate PMU event hash table");
+        error_setg(errp, "Unable to allocate first PMU event hash table");
+        return;
+    }
+
+    cpu->pmu_ctr_event_map = g_hash_table_new(g_direct_hash, g_direct_equal);
+    if (!cpu->pmu_ctr_event_map) {
+        error_setg(errp, "Unable to allocate second PMU event hash table");
         return;
     }
 
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
index 7c0ad661e0..b99a5f58d4 100644
--- a/target/riscv/pmu.h
+++ b/target/riscv/pmu.h
@@ -22,10 +22,12 @@
 #include "cpu.h"
 #include "qapi/error.h"
 
-bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
-                                        uint32_t target_ctr);
-bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env,
-                                  uint32_t target_ctr);
+#define RISCV_PMU_EVENT_NOT_PRESENTED -1
+
+#define RISCV_PMU_CTR_IS_HPM(x) (x > 2)
+
+int riscv_pmu_get_event_by_ctr(CPURISCVState *env,
+                               uint32_t target_ctr);
 void riscv_pmu_timer_cb(void *priv);
 void riscv_pmu_init(RISCVCPU *cpu, Error **errp);
 int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
@@ -34,5 +36,6 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
 void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
 int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
                           uint32_t ctr_idx);
+bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx);
 
 #endif /* RISCV_PMU_H */
-- 
2.34.1
Re: [PATCH v2] target/riscv: Add support for machine specific pmu's events
Posted by Richard Henderson 5 months ago
On 6/25/24 07:46, Alexei Filippov wrote:
> Was added call backs for machine specific pmu events.
> Simplify monitor functions by adding new hash table, which going to map
> counter number and event index.
> Was added read/write callbacks which going to simplify support for events,
> which expected to have different behavior.
> 
> Signed-off-by: Alexei Filippov <alexei.filippov@syntacore.com>
> ---
> Changes since v2:
> 		-rebased to latest master
>   target/riscv/cpu.h |   9 +++
>   target/riscv/csr.c |  43 +++++++++-----
>   target/riscv/pmu.c | 139 ++++++++++++++++++++++-----------------------
>   target/riscv/pmu.h |  11 ++--
>   4 files changed, 115 insertions(+), 87 deletions(-)
> 
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 6fe0d712b4..fbf82b050b 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -374,6 +374,13 @@ struct CPUArchState {
>       uint64_t (*rdtime_fn)(void *);
>       void *rdtime_fn_arg;
>   
> +    /*machine specific pmu callback */
> +    void (*pmu_ctr_write)(PMUCTRState *counter, uint32_t event_idx,
> +                          target_ulong val, bool high_half);
> +    target_ulong (*pmu_ctr_read)(PMUCTRState *counter, uint32_t event_idx,
> +                                 bool high_half);
> +    bool (*pmu_vendor_support)(uint32_t event_idx);

Do these really belong in CPUArchState, rather than RISCVCPUClass?

Surely there's more to this series, since these fields are never set...


r~
Re: [PATCH v2] target/riscv: Add support for machine specific pmu's events
Posted by Aleksei Filippov 4 months, 2 weeks ago

On 25.06.2024 21:18, Richard Henderson wrote:
> On 6/25/24 07:46, Alexei Filippov wrote:
>> Was added call backs for machine specific pmu events.
>> Simplify monitor functions by adding new hash table, which going to map
>> counter number and event index.
>> Was added read/write callbacks which going to simplify support for events,
>> which expected to have different behavior.
>>
>> Signed-off-by: Alexei Filippov <alexei.filippov@syntacore.com>
>> ---
>> Changes since v2:
>>         -rebased to latest master
>>   target/riscv/cpu.h |   9 +++
>>   target/riscv/csr.c |  43 +++++++++-----
>>   target/riscv/pmu.c | 139 ++++++++++++++++++++++-----------------------
>>   target/riscv/pmu.h |  11 ++--
>>   4 files changed, 115 insertions(+), 87 deletions(-)
>>
>> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
>> index 6fe0d712b4..fbf82b050b 100644
>> --- a/target/riscv/cpu.h
>> +++ b/target/riscv/cpu.h
>> @@ -374,6 +374,13 @@ struct CPUArchState {
>>       uint64_t (*rdtime_fn)(void *);
>>       void *rdtime_fn_arg;
>> +    /*machine specific pmu callback */
>> +    void (*pmu_ctr_write)(PMUCTRState *counter, uint32_t event_idx,
>> +                          target_ulong val, bool high_half);
>> +    target_ulong (*pmu_ctr_read)(PMUCTRState *counter, uint32_t event_idx,
>> +                                 bool high_half);
>> +    bool (*pmu_vendor_support)(uint32_t event_idx);
> 
> Do these really belong in CPUArchState, rather than RISCVCPUClass?
> 
> Surely there's more to this series, since these fields are never set...
> 
> 
> r~

Initially this callbacks was added to CPUArchState just to be along with similar 
implementation with rdtime_fn*.

Yes, you're right, there are more series to this, but, it can't be separated 
from syntacore specific parts, which is unfortunately not ready yet to be 
published. So, I can prepare second patch to implement PMU subsystem for virt 
device. What do you think about it? (I'll send it in the few days).

-- 
Sincerely,
Aleksei Filippov

Re: [PATCH v2] target/riscv: Add support for machine specific pmu's events
Posted by Philippe Mathieu-Daudé 4 months, 2 weeks ago
Hi Aleksei,

On 8/7/24 11:46, Aleksei Filippov wrote:
> On 25.06.2024 21:18, Richard Henderson wrote:
>> On 6/25/24 07:46, Alexei Filippov wrote:
>>> Was added call backs for machine specific pmu events.
>>> Simplify monitor functions by adding new hash table, which going to map
>>> counter number and event index.
>>> Was added read/write callbacks which going to simplify support for 
>>> events,
>>> which expected to have different behavior.
>>>
>>> Signed-off-by: Alexei Filippov <alexei.filippov@syntacore.com>
>>> ---
>>> Changes since v2:
>>>         -rebased to latest master
>>>   target/riscv/cpu.h |   9 +++
>>>   target/riscv/csr.c |  43 +++++++++-----
>>>   target/riscv/pmu.c | 139 ++++++++++++++++++++++-----------------------
>>>   target/riscv/pmu.h |  11 ++--
>>>   4 files changed, 115 insertions(+), 87 deletions(-)
>>>
>>> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
>>> index 6fe0d712b4..fbf82b050b 100644
>>> --- a/target/riscv/cpu.h
>>> +++ b/target/riscv/cpu.h
>>> @@ -374,6 +374,13 @@ struct CPUArchState {
>>>       uint64_t (*rdtime_fn)(void *);
>>>       void *rdtime_fn_arg;
>>> +    /*machine specific pmu callback */
>>> +    void (*pmu_ctr_write)(PMUCTRState *counter, uint32_t event_idx,
>>> +                          target_ulong val, bool high_half);
>>> +    target_ulong (*pmu_ctr_read)(PMUCTRState *counter, uint32_t 
>>> event_idx,
>>> +                                 bool high_half);
>>> +    bool (*pmu_vendor_support)(uint32_t event_idx);
>>
>> Do these really belong in CPUArchState, rather than RISCVCPUClass?
>>
>> Surely there's more to this series, since these fields are never set...
>>
>>
>> r~
> 
> Initially this callbacks was added to CPUArchState just to be along with 
> similar implementation with rdtime_fn*.
> 
> Yes, you're right, there are more series to this, but, it can't be 
> separated from syntacore specific parts, which is unfortunately not 
> ready yet to be published. So, I can prepare second patch to implement 
> PMU subsystem for virt device. What do you think about it? (I'll send it 
> in the few days).

How can we test your patch meanwhile?

Thanks,

Phil.


Re: [PATCH v2] target/riscv: Add support for machine specific pmu's events
Posted by Aleksei Filippov 2 months, 1 week ago

On 08.07.2024 16:42, Philippe Mathieu-Daudé wrote:
> Hi Aleksei,
> 
> On 8/7/24 11:46, Aleksei Filippov wrote:
>> On 25.06.2024 21:18, Richard Henderson wrote:
>>> On 6/25/24 07:46, Alexei Filippov wrote:
>>>> Was added call backs for machine specific pmu events.
>>>> Simplify monitor functions by adding new hash table, which going to map
>>>> counter number and event index.
>>>> Was added read/write callbacks which going to simplify support for events,
>>>> which expected to have different behavior.
>>>>
>>>> Signed-off-by: Alexei Filippov <alexei.filippov@syntacore.com>
>>>> ---
>>>> Changes since v2:
>>>>         -rebased to latest master
>>>>   target/riscv/cpu.h |   9 +++
>>>>   target/riscv/csr.c |  43 +++++++++-----
>>>>   target/riscv/pmu.c | 139 ++++++++++++++++++++++-----------------------
>>>>   target/riscv/pmu.h |  11 ++--
>>>>   4 files changed, 115 insertions(+), 87 deletions(-)
>>>>
>>>> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
>>>> index 6fe0d712b4..fbf82b050b 100644
>>>> --- a/target/riscv/cpu.h
>>>> +++ b/target/riscv/cpu.h
>>>> @@ -374,6 +374,13 @@ struct CPUArchState {
>>>>       uint64_t (*rdtime_fn)(void *);
>>>>       void *rdtime_fn_arg;
>>>> +    /*machine specific pmu callback */
>>>> +    void (*pmu_ctr_write)(PMUCTRState *counter, uint32_t event_idx,
>>>> +                          target_ulong val, bool high_half);
>>>> +    target_ulong (*pmu_ctr_read)(PMUCTRState *counter, uint32_t event_idx,
>>>> +                                 bool high_half);
>>>> +    bool (*pmu_vendor_support)(uint32_t event_idx);
>>>
>>> Do these really belong in CPUArchState, rather than RISCVCPUClass?
>>>
>>> Surely there's more to this series, since these fields are never set...
>>>
>>>
>>> r~
>>
>> Initially this callbacks was added to CPUArchState just to be along with 
>> similar implementation with rdtime_fn*.
>>
>> Yes, you're right, there are more series to this, but, it can't be separated 
>> from syntacore specific parts, which is unfortunately not ready yet to be 
>> published. So, I can prepare second patch to implement PMU subsystem for virt 
>> device. What do you think about it? (I'll send it in the few days).
> 
> How can we test your patch meanwhile?
Sorry for such late response, but anyway, I created an RFC with PoC and 
description on how I tested this and resend patch with PoC all together. 
https://lore.kernel.org/all/20240910174747.148141-1-alexei.filippov@syntacore.com/

> 
> Thanks,
> 
> Phil.
> 

-- 
Sincerely,
Aleksei Filippov