Replace hard-coded ref-cycles event index with macro to avoid possible
mismatch issue if new event is added in the future and cause ref-cycles
event index changed, but forget to update the hard-coded ref-cycles
event index.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
x86/pmu.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/x86/pmu.c b/x86/pmu.c
index 496ee877..523369b2 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -55,6 +55,7 @@ struct pmu_event {
* intel_gp_events[].
*/
enum {
+ INTEL_REF_CYCLES_IDX = 2,
INTEL_BRANCHES_IDX = 5,
};
@@ -708,7 +709,8 @@ static void set_ref_cycle_expectations(void)
{
pmu_counter_t cnt = {
.ctr = MSR_IA32_PERFCTR0,
- .config = EVNTSEL_OS | EVNTSEL_USR | intel_gp_events[2].unit_sel,
+ .config = EVNTSEL_OS | EVNTSEL_USR |
+ intel_gp_events[INTEL_REF_CYCLES_IDX].unit_sel,
};
uint64_t tsc_delta;
uint64_t t0, t1, t2, t3;
@@ -744,8 +746,10 @@ static void set_ref_cycle_expectations(void)
if (!tsc_delta)
return;
- intel_gp_events[2].min = (intel_gp_events[2].min * cnt.count) / tsc_delta;
- intel_gp_events[2].max = (intel_gp_events[2].max * cnt.count) / tsc_delta;
+ intel_gp_events[INTEL_REF_CYCLES_IDX].min =
+ (intel_gp_events[INTEL_REF_CYCLES_IDX].min * cnt.count) / tsc_delta;
+ intel_gp_events[INTEL_REF_CYCLES_IDX].max =
+ (intel_gp_events[INTEL_REF_CYCLES_IDX].max * cnt.count) / tsc_delta;
}
static void check_invalid_rdpmc_gp(void)
--
2.40.1