arch-PEBS provides CPUIDs to enumerate which counters support PEBS
sampling and precise distribution PEBS sampling. Thus PEBS constraints
should be dynamically configured base on these counter and precise
distribution bitmap instead of defining them statically.
Update event dyn_constraint base on PEBS event precise level.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/core.c | 11 +++++++++++
arch/x86/events/intel/ds.c | 1 +
2 files changed, 12 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 41c4af6bd62c..818c197585c6 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4252,6 +4252,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (event->attr.precise_ip) {
+ struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
+
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;
@@ -4265,6 +4267,15 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
+
+ if (x86_pmu.arch_pebs) {
+ u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
+ ~GLOBAL_CTRL_EN_PERF_METRICS;
+ u64 pebs_mask = event->attr.precise_ip >= 3 ?
+ pebs_cap.pdists : pebs_cap.counters;
+ if (cntr_mask != pebs_mask)
+ event->hw.dyn_constraint &= pebs_mask;
+ }
}
if (needs_branch_stack(event)) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index ed9bce51182d..4217d0a5d584 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -3022,6 +3022,7 @@ static void __init intel_arch_pebs_init(void)
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs;
x86_pmu.pebs_capable = ~0ULL;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.pebs_enable = __intel_pmu_pebs_enable;
x86_pmu.pebs_disable = __intel_pmu_pebs_disable;
--
2.34.1