Use macros to replace these attr_rdpmc magic numbers, so users are easy
to know their meaning.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/core.c | 7 ++++---
arch/x86/events/intel/p6.c | 2 +-
arch/x86/events/perf_event.h | 7 +++++++
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 5d0d5e466c62..3d9cc1d7fcfa 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2129,7 +2129,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
- x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+ /* enable userspace RDPMC usage by default */
+ x86_pmu.attr_rdpmc = X86_USER_RDPMC_CONDITIONAL_ENABLE;
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
@@ -2609,12 +2610,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
*/
if (val == 0)
static_branch_inc(&rdpmc_never_available_key);
- else if (x86_pmu.attr_rdpmc == 0)
+ else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_NEVER_ENABLE)
static_branch_dec(&rdpmc_never_available_key);
if (val == 2)
static_branch_inc(&rdpmc_always_available_key);
- else if (x86_pmu.attr_rdpmc == 2)
+ else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE)
static_branch_dec(&rdpmc_always_available_key);
on_each_cpu(cr4_update_pce, NULL, 1);
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index 6e41de355bd8..fb991e0ac614 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -243,7 +243,7 @@ static __init void p6_pmu_rdpmc_quirk(void)
*/
pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
x86_pmu.attr_rdpmc_broken = 1;
- x86_pmu.attr_rdpmc = 0;
+ x86_pmu.attr_rdpmc = X86_USER_RDPMC_NEVER_ENABLE;
}
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f7caabc5d487..24a81d2916e9 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -187,6 +187,13 @@ struct amd_nb {
(1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15))
+/* user space rdpmc control values */
+enum {
+ X86_USER_RDPMC_NEVER_ENABLE = 0,
+ X86_USER_RDPMC_CONDITIONAL_ENABLE = 1,
+ X86_USER_RDPMC_ALWAYS_ENABLE = 2,
+};
+
/*
* Per register state.
*/
--
2.34.1
On Wed, Nov 19, 2025 at 9:37 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>
> Use macros to replace these attr_rdpmc magic numbers, so users are easy
> to know their meaning.
>
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
I'm reminded that we were having issues with rdpmc on hybrid:
https://lore.kernel.org/lkml/20250614004528.1652860-1-irogers@google.com/
like the enable/disable rdpmc flag being shared across the cpu_core
and cpu_atom PMUs, and needing to force the thread doing the rdpmc to
have affinity matching the CPUs of the PMU it is reading from, as
otherwise things like struct perf_event_mmap_page's index could do
interesting things:
https://web.git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/tests/mmap-basic.c?h=perf-tools-next#n208
Others required the rdpmc to be in a restartable sequence and Peter
proposed fixing this in the kernel:
https://lore.kernel.org/linux-perf-users/20250618084522.GE1613376@noisy.programming.kicks-ass.net/
Also looks like we never merged fixing the documentation:
https://lore.kernel.org/lkml/20220817174909.877139-1-irogers@google.com/
Not specifically a DMR issue but this is reminding me of a bunch of
tech debt - I also wonder if this patch requires the test being
updated.
Thanks,
Ian
> ---
> arch/x86/events/core.c | 7 ++++---
> arch/x86/events/intel/p6.c | 2 +-
> arch/x86/events/perf_event.h | 7 +++++++
> 3 files changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 5d0d5e466c62..3d9cc1d7fcfa 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -2129,7 +2129,8 @@ static int __init init_hw_perf_events(void)
>
> pr_cont("%s PMU driver.\n", x86_pmu.name);
>
> - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
> + /* enable userspace RDPMC usage by default */
> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_CONDITIONAL_ENABLE;
>
> for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
> quirk->func();
> @@ -2609,12 +2610,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
> */
> if (val == 0)
> static_branch_inc(&rdpmc_never_available_key);
> - else if (x86_pmu.attr_rdpmc == 0)
> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_NEVER_ENABLE)
> static_branch_dec(&rdpmc_never_available_key);
>
> if (val == 2)
> static_branch_inc(&rdpmc_always_available_key);
> - else if (x86_pmu.attr_rdpmc == 2)
> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE)
> static_branch_dec(&rdpmc_always_available_key);
>
> on_each_cpu(cr4_update_pce, NULL, 1);
> diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
> index 6e41de355bd8..fb991e0ac614 100644
> --- a/arch/x86/events/intel/p6.c
> +++ b/arch/x86/events/intel/p6.c
> @@ -243,7 +243,7 @@ static __init void p6_pmu_rdpmc_quirk(void)
> */
> pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
> x86_pmu.attr_rdpmc_broken = 1;
> - x86_pmu.attr_rdpmc = 0;
> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_NEVER_ENABLE;
> }
> }
>
> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
> index f7caabc5d487..24a81d2916e9 100644
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -187,6 +187,13 @@ struct amd_nb {
> (1ULL << PERF_REG_X86_R14) | \
> (1ULL << PERF_REG_X86_R15))
>
> +/* user space rdpmc control values */
> +enum {
> + X86_USER_RDPMC_NEVER_ENABLE = 0,
> + X86_USER_RDPMC_CONDITIONAL_ENABLE = 1,
> + X86_USER_RDPMC_ALWAYS_ENABLE = 2,
> +};
> +
> /*
> * Per register state.
> */
> --
> 2.34.1
>
On 11/20/2025 2:19 PM, Ian Rogers wrote:
> On Wed, Nov 19, 2025 at 9:37 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>> Use macros to replace these attr_rdpmc magic numbers, so users are easy
>> to know their meaning.
>>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> I'm reminded that we were having issues with rdpmc on hybrid:
> https://lore.kernel.org/lkml/20250614004528.1652860-1-irogers@google.com/
> like the enable/disable rdpmc flag being shared across the cpu_core
> and cpu_atom PMUs, and needing to force the thread doing the rdpmc to
> have affinity matching the CPUs of the PMU it is reading from, as
> otherwise things like struct perf_event_mmap_page's index could do
> interesting things:
> https://web.git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/tests/mmap-basic.c?h=perf-tools-next#n208
> Others required the rdpmc to be in a restartable sequence and Peter
> proposed fixing this in the kernel:
> https://lore.kernel.org/linux-perf-users/20250618084522.GE1613376@noisy.programming.kicks-ass.net/
>
> Also looks like we never merged fixing the documentation:
> https://lore.kernel.org/lkml/20220817174909.877139-1-irogers@google.com/
>
> Not specifically a DMR issue but this is reminding me of a bunch of
> tech debt - I also wonder if this patch requires the test being
> updated.
Thanks for reminding. This patch doesn't involve any functional change for
the "rdpmc" attribute, but along with the introduction of per-counter
"rdpmc user disable" in patch 7/7, the rdpmc test can definitely be
enhanced. I would enhance the rpdmc test in next version.
>
> Thanks,
> Ian
>
>> ---
>> arch/x86/events/core.c | 7 ++++---
>> arch/x86/events/intel/p6.c | 2 +-
>> arch/x86/events/perf_event.h | 7 +++++++
>> 3 files changed, 12 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
>> index 5d0d5e466c62..3d9cc1d7fcfa 100644
>> --- a/arch/x86/events/core.c
>> +++ b/arch/x86/events/core.c
>> @@ -2129,7 +2129,8 @@ static int __init init_hw_perf_events(void)
>>
>> pr_cont("%s PMU driver.\n", x86_pmu.name);
>>
>> - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
>> + /* enable userspace RDPMC usage by default */
>> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_CONDITIONAL_ENABLE;
>>
>> for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
>> quirk->func();
>> @@ -2609,12 +2610,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
>> */
>> if (val == 0)
>> static_branch_inc(&rdpmc_never_available_key);
>> - else if (x86_pmu.attr_rdpmc == 0)
>> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_NEVER_ENABLE)
>> static_branch_dec(&rdpmc_never_available_key);
>>
>> if (val == 2)
>> static_branch_inc(&rdpmc_always_available_key);
>> - else if (x86_pmu.attr_rdpmc == 2)
>> + else if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE)
>> static_branch_dec(&rdpmc_always_available_key);
>>
>> on_each_cpu(cr4_update_pce, NULL, 1);
>> diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
>> index 6e41de355bd8..fb991e0ac614 100644
>> --- a/arch/x86/events/intel/p6.c
>> +++ b/arch/x86/events/intel/p6.c
>> @@ -243,7 +243,7 @@ static __init void p6_pmu_rdpmc_quirk(void)
>> */
>> pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
>> x86_pmu.attr_rdpmc_broken = 1;
>> - x86_pmu.attr_rdpmc = 0;
>> + x86_pmu.attr_rdpmc = X86_USER_RDPMC_NEVER_ENABLE;
>> }
>> }
>>
>> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
>> index f7caabc5d487..24a81d2916e9 100644
>> --- a/arch/x86/events/perf_event.h
>> +++ b/arch/x86/events/perf_event.h
>> @@ -187,6 +187,13 @@ struct amd_nb {
>> (1ULL << PERF_REG_X86_R14) | \
>> (1ULL << PERF_REG_X86_R15))
>>
>> +/* user space rdpmc control values */
>> +enum {
>> + X86_USER_RDPMC_NEVER_ENABLE = 0,
>> + X86_USER_RDPMC_CONDITIONAL_ENABLE = 1,
>> + X86_USER_RDPMC_ALWAYS_ENABLE = 2,
>> +};
>> +
>> /*
>> * Per register state.
>> */
>> --
>> 2.34.1
>>
© 2016 - 2025 Red Hat, Inc.