[tip: perf/core] perf/x86/intel: Optimize PEBS extended config

tip-bot2 for Peter Zijlstra posted 1 patch 2 months, 4 weeks ago
arch/x86/events/intel/core.c | 25 ++++++++++++++-----------
1 file changed, 14 insertions(+), 11 deletions(-)
[tip: perf/core] perf/x86/intel: Optimize PEBS extended config
Posted by tip-bot2 for Peter Zijlstra 2 months, 4 weeks ago
The following commit has been merged into the perf/core branch of tip:

Commit-ID:     2093d8cf80fa5552d1025a78a8f3a10bf3b6466e
Gitweb:        https://git.kernel.org/tip/2093d8cf80fa5552d1025a78a8f3a10bf3b6466e
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Fri, 07 Nov 2025 14:50:20 +01:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 07 Nov 2025 15:08:23 +01:00

perf/x86/intel: Optimize PEBS extended config

Similar to enable_acr_event, avoid the branch.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/x86/events/intel/core.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a421595..aad89c9 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2582,9 +2582,6 @@ static inline void __intel_pmu_update_event_ext(int idx, u64 ext)
 
 static void intel_pmu_disable_event_ext(struct perf_event *event)
 {
-	if (!x86_pmu.arch_pebs)
-		return;
-
 	/*
 	 * Only clear CFG_C MSR for PEBS counter group events,
 	 * it avoids the HW counter's value to be added into
@@ -2602,6 +2599,8 @@ static void intel_pmu_disable_event_ext(struct perf_event *event)
 	__intel_pmu_update_event_ext(event->hw.idx, 0);
 }
 
+DEFINE_STATIC_CALL_NULL(intel_pmu_disable_event_ext, intel_pmu_disable_event_ext);
+
 static void intel_pmu_disable_event(struct perf_event *event)
 {
 	struct hw_perf_event *hwc = &event->hw;
@@ -2610,11 +2609,11 @@ static void intel_pmu_disable_event(struct perf_event *event)
 	switch (idx) {
 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
 		intel_clear_masks(event, idx);
-		intel_pmu_disable_event_ext(event);
+		static_call_cond(intel_pmu_disable_event_ext)(event);
 		x86_pmu_disable_event(event);
 		break;
 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
-		intel_pmu_disable_event_ext(event);
+		static_call_cond(intel_pmu_disable_event_ext)(event);
 		fallthrough;
 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
 		intel_pmu_disable_fixed(event);
@@ -2990,9 +2989,6 @@ static void intel_pmu_enable_event_ext(struct perf_event *event)
 	struct arch_pebs_cap cap;
 	u64 ext = 0;
 
-	if (!x86_pmu.arch_pebs)
-		return;
-
 	cap = hybrid(cpuc->pmu, arch_pebs_cap);
 
 	if (event->attr.precise_ip) {
@@ -3056,6 +3052,8 @@ static void intel_pmu_enable_event_ext(struct perf_event *event)
 		__intel_pmu_update_event_ext(hwc->idx, ext);
 }
 
+DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext);
+
 static void intel_pmu_enable_event(struct perf_event *event)
 {
 	u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
@@ -3071,12 +3069,12 @@ static void intel_pmu_enable_event(struct perf_event *event)
 			enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
 		intel_set_masks(event, idx);
 		static_call_cond(intel_pmu_enable_acr_event)(event);
-		intel_pmu_enable_event_ext(event);
+		static_call_cond(intel_pmu_enable_event_ext)(event);
 		__x86_pmu_enable_event(hwc, enable_mask);
 		break;
 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
 		static_call_cond(intel_pmu_enable_acr_event)(event);
-		intel_pmu_enable_event_ext(event);
+		static_call_cond(intel_pmu_enable_event_ext)(event);
 		fallthrough;
 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
 		intel_pmu_enable_fixed(event);
@@ -8106,8 +8104,13 @@ __init int intel_pmu_init(void)
 	if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
 		update_pmu_cap(NULL);
 
-	if (x86_pmu.arch_pebs)
+	if (x86_pmu.arch_pebs) {
+		static_call_update(intel_pmu_disable_event_ext,
+				   intel_pmu_disable_event_ext);
+		static_call_update(intel_pmu_enable_event_ext,
+				   intel_pmu_enable_event_ext);
 		pr_cont("Architectural PEBS, ");
+	}
 
 	intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
 				      &x86_pmu.fixed_cntr_mask64,