Macro GLOBAL_CTRL_EN_PERF_METRICS is defined to 48 instead of
BIT_ULL(48), it's inconsistent with other similar macros. This leads to
this macro is quite easily used wrongly since users thinks it's a
bit-mask just like other similar macros.
Thus change GLOBAL_CTRL_EN_PERF_METRICS to BIT_ULL(48) and eliminate
this potential misuse.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Yi Lai <yi1.lai@intel.com>
---
arch/x86/events/intel/core.c | 8 ++++----
arch/x86/include/asm/perf_event.h | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 15da60cf69f2..f88a99d8d125 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5319,9 +5319,9 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
if (pmu->intel_cap.perf_metrics)
- pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+ pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
else
- pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
intel_pmu_check_event_constraints(pmu->event_constraints,
pmu->cntr_mask64,
@@ -5456,7 +5456,7 @@ static void intel_pmu_cpu_starting(int cpu)
rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
if (!perf_cap.perf_metrics) {
x86_pmu.intel_cap.perf_metrics = 0;
- x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
}
}
@@ -7790,7 +7790,7 @@ __init int intel_pmu_init(void)
}
if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
- x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+ x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
if (x86_pmu.intel_cap.pebs_timing_info)
x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 70d1d94aca7e..f8247ac276c4 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -430,7 +430,7 @@ static inline bool is_topdown_idx(int idx)
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
-#define GLOBAL_CTRL_EN_PERF_METRICS 48
+#define GLOBAL_CTRL_EN_PERF_METRICS BIT_ULL(48)
/*
* We model guest LBR event tracing as another fixed-mode PMC like BTS.
*
--
2.34.1
"Tip bot" Just the tip. Wonder if this was sent to "menglongdong" (a kernel programmer) On Tue, Aug 19, 2025 at 10:33 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote: > > Macro GLOBAL_CTRL_EN_PERF_METRICS is defined to 48 instead of > BIT_ULL(48), it's inconsistent with other similar macros. This leads to > this macro is quite easily used wrongly since users thinks it's a > bit-mask just like other similar macros. > > Thus change GLOBAL_CTRL_EN_PERF_METRICS to BIT_ULL(48) and eliminate > this potential misuse. > > Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> > Tested-by: Yi Lai <yi1.lai@intel.com> > --- > arch/x86/events/intel/core.c | 8 ++++---- > arch/x86/include/asm/perf_event.h | 2 +- > 2 files changed, 5 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c > index 15da60cf69f2..f88a99d8d125 100644 > --- a/arch/x86/events/intel/core.c > +++ b/arch/x86/events/intel/core.c > @@ -5319,9 +5319,9 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) > 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); > > if (pmu->intel_cap.perf_metrics) > - pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; > + pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS; > else > - pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); > + pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS; > > intel_pmu_check_event_constraints(pmu->event_constraints, > pmu->cntr_mask64, > @@ -5456,7 +5456,7 @@ static void intel_pmu_cpu_starting(int cpu) > rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); > if (!perf_cap.perf_metrics) { > x86_pmu.intel_cap.perf_metrics = 0; > - x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); > + x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS; > } > } > > @@ -7790,7 +7790,7 @@ __init int intel_pmu_init(void) > } > > if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) > - x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; > + x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS; > > if (x86_pmu.intel_cap.pebs_timing_info) > x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; > diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h > index 70d1d94aca7e..f8247ac276c4 100644 > --- a/arch/x86/include/asm/perf_event.h > +++ b/arch/x86/include/asm/perf_event.h > @@ -430,7 +430,7 @@ static inline bool is_topdown_idx(int idx) > #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) > #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 > > -#define GLOBAL_CTRL_EN_PERF_METRICS 48 > +#define GLOBAL_CTRL_EN_PERF_METRICS BIT_ULL(48) > /* > * We model guest LBR event tracing as another fixed-mode PMC like BTS. > * > -- > 2.34.1 > >
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 9b3e119784bc3671fde5043001a5c9a607c7d920
Gitweb: https://git.kernel.org/tip/9b3e119784bc3671fde5043001a5c9a607c7d920
Author: Dapeng Mi <dapeng1.mi@linux.intel.com>
AuthorDate: Wed, 20 Aug 2025 10:30:30 +08:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Thu, 21 Aug 2025 20:09:27 +02:00
perf/x86/intel: Change macro GLOBAL_CTRL_EN_PERF_METRICS to BIT_ULL(48)
Macro GLOBAL_CTRL_EN_PERF_METRICS is defined to 48 instead of
BIT_ULL(48), it's inconsistent with other similar macros. This leads to
this macro is quite easily used wrongly since users thinks it's a
bit-mask just like other similar macros.
Thus change GLOBAL_CTRL_EN_PERF_METRICS to BIT_ULL(48) and eliminate
this potential misuse.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Tested-by: Yi Lai <yi1.lai@intel.com>
Link: https://lore.kernel.org/r/20250820023032.17128-6-dapeng1.mi@linux.intel.com
---
arch/x86/events/intel/core.c | 8 ++++----
arch/x86/include/asm/perf_event.h | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 15da60c..f88a99d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5319,9 +5319,9 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
if (pmu->intel_cap.perf_metrics)
- pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+ pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
else
- pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
intel_pmu_check_event_constraints(pmu->event_constraints,
pmu->cntr_mask64,
@@ -5456,7 +5456,7 @@ static void intel_pmu_cpu_starting(int cpu)
rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
if (!perf_cap.perf_metrics) {
x86_pmu.intel_cap.perf_metrics = 0;
- x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
}
}
@@ -7790,7 +7790,7 @@ __init int intel_pmu_init(void)
}
if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
- x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+ x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
if (x86_pmu.intel_cap.pebs_timing_info)
x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 70d1d94..f8247ac 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -430,7 +430,7 @@ static inline bool is_topdown_idx(int idx)
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
-#define GLOBAL_CTRL_EN_PERF_METRICS 48
+#define GLOBAL_CTRL_EN_PERF_METRICS BIT_ULL(48)
/*
* We model guest LBR event tracing as another fixed-mode PMC like BTS.
*
© 2016 - 2025 Red Hat, Inc.