arch/x86/events/core.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)
The following bug was triggered on a system built with
CONFIG_DEBUG_PREEMPT=y:
# echo p > /proc/sysrq-trigger
BUG: using smp_processor_id() in preemptible [00000000] code: sh/117
caller is perf_event_print_debug+0x1a/0x4c0
CPU: 3 UID: 0 PID: 117 Comm: sh Not tainted 6.11.0-rc1 #109
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
Call Trace:
<TASK>
dump_stack_lvl+0x4f/0x60
check_preemption_disabled+0xc8/0xd0
perf_event_print_debug+0x1a/0x4c0
__handle_sysrq+0x140/0x180
write_sysrq_trigger+0x61/0x70
proc_reg_write+0x4e/0x70
vfs_write+0xd0/0x430
? handle_mm_fault+0xc8/0x240
ksys_write+0x9c/0xd0
do_syscall_64+0x96/0x190
entry_SYSCALL_64_after_hwframe+0x4b/0x53
This is because the commit d4b294bf84db ("perf/x86: Hybrid PMU support
for counters") took smp_processor_id() outside the irq critical section.
If a preemption occurs in perf_event_print_debug() and the task is
migrated to another cpu, we may get incorrect pmu debug information.
Move smp_processor_id() back inside the irq critical section to fix this
issue.
Fixes: d4b294bf84db ("perf/x86: Hybrid PMU support for counters")
Signed-off-by: Li Huafei <lihuafei1@huawei.com>
---
arch/x86/events/core.c | 19 ++++++++++++-------
1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 12f2a0c14d33..c0157a5d8296 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1521,19 +1521,24 @@ void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
u64 pebs, debugctl;
- int cpu = smp_processor_id();
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
- unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
- struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
+ int cpu;
+ struct cpu_hw_events *cpuc;
+ unsigned long *cntr_mask, *fixed_cntr_mask;
+ struct event_constraint *pebs_constraints;
unsigned long flags;
int idx;
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_events, cpu);
+ cntr_mask = hybrid(cpuc->pmu, cntr_mask);
+ fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
+ pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
+
if (!*(u64 *)cntr_mask)
return;
- local_irq_save(flags);
-
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
--
2.25.1
Hello Huafei,
On 7/29/2024 5:15 PM, Li Huafei wrote:
> The following bug was triggered on a system built with
> CONFIG_DEBUG_PREEMPT=y:
>
> # echo p > /proc/sysrq-trigger
>
> BUG: using smp_processor_id() in preemptible [00000000] code: sh/117
> caller is perf_event_print_debug+0x1a/0x4c0
> CPU: 3 UID: 0 PID: 117 Comm: sh Not tainted 6.11.0-rc1 #109
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
> Call Trace:
> <TASK>
> dump_stack_lvl+0x4f/0x60
> check_preemption_disabled+0xc8/0xd0
> perf_event_print_debug+0x1a/0x4c0
> __handle_sysrq+0x140/0x180
> write_sysrq_trigger+0x61/0x70
> proc_reg_write+0x4e/0x70
> vfs_write+0xd0/0x430
> ? handle_mm_fault+0xc8/0x240
> ksys_write+0x9c/0xd0
> do_syscall_64+0x96/0x190
> entry_SYSCALL_64_after_hwframe+0x4b/0x53
>
> This is because the commit d4b294bf84db ("perf/x86: Hybrid PMU support
> for counters") took smp_processor_id() outside the irq critical section.
> If a preemption occurs in perf_event_print_debug() and the task is
> migrated to another cpu, we may get incorrect pmu debug information.
> Move smp_processor_id() back inside the irq critical section to fix this
> issue.
>
> Fixes: d4b294bf84db ("perf/x86: Hybrid PMU support for counters")
> Signed-off-by: Li Huafei <lihuafei1@huawei.com>
> ---
> arch/x86/events/core.c | 19 ++++++++++++-------
> 1 file changed, 12 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 12f2a0c14d33..c0157a5d8296 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -1521,19 +1521,24 @@ void perf_event_print_debug(void)
> {
> u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
> u64 pebs, debugctl;
> - int cpu = smp_processor_id();
> - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> - unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
> - unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
> - struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
> + int cpu;
> + struct cpu_hw_events *cpuc;
> + unsigned long *cntr_mask, *fixed_cntr_mask;
> + struct event_constraint *pebs_constraints;
> unsigned long flags;
> int idx;
nit. "cpu" and "idx" can be defined together and this can be converted
to use revere xmas tree order.
>
> + local_irq_save(flags);
Perhaps use "guard(irqsave)();" here since ...
> +
> + cpu = smp_processor_id();
> + cpuc = &per_cpu(cpu_hw_events, cpu);
> + cntr_mask = hybrid(cpuc->pmu, cntr_mask);
> + fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
> + pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
> +
> if (!*(u64 *)cntr_mask)
... a "local_irq_restore(flags)" is required here now before returning
and using the guard can avoid that. Even the flags variable will no
longer be necessary.
Thoughts?
> return;
>
> - local_irq_save(flags);
> -
> if (x86_pmu.version >= 2) {
> rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
> rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
--
Thanks and Regards,
Prateek
On 2024/7/29 17:13, K Prateek Nayak wrote:
> Hello Huafei,
>
> On 7/29/2024 5:15 PM, Li Huafei wrote:
>> The following bug was triggered on a system built with
>> CONFIG_DEBUG_PREEMPT=y:
>>
>> # echo p > /proc/sysrq-trigger
>>
>> BUG: using smp_processor_id() in preemptible [00000000] code: sh/117
>> caller is perf_event_print_debug+0x1a/0x4c0
>> CPU: 3 UID: 0 PID: 117 Comm: sh Not tainted 6.11.0-rc1 #109
>> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
>> 1.13.0-1ubuntu1.1 04/01/2014
>> Call Trace:
>> <TASK>
>> dump_stack_lvl+0x4f/0x60
>> check_preemption_disabled+0xc8/0xd0
>> perf_event_print_debug+0x1a/0x4c0
>> __handle_sysrq+0x140/0x180
>> write_sysrq_trigger+0x61/0x70
>> proc_reg_write+0x4e/0x70
>> vfs_write+0xd0/0x430
>> ? handle_mm_fault+0xc8/0x240
>> ksys_write+0x9c/0xd0
>> do_syscall_64+0x96/0x190
>> entry_SYSCALL_64_after_hwframe+0x4b/0x53
>>
>> This is because the commit d4b294bf84db ("perf/x86: Hybrid PMU support
>> for counters") took smp_processor_id() outside the irq critical section.
>> If a preemption occurs in perf_event_print_debug() and the task is
>> migrated to another cpu, we may get incorrect pmu debug information.
>> Move smp_processor_id() back inside the irq critical section to fix this
>> issue.
>>
>> Fixes: d4b294bf84db ("perf/x86: Hybrid PMU support for counters")
>> Signed-off-by: Li Huafei <lihuafei1@huawei.com>
>> ---
>> arch/x86/events/core.c | 19 ++++++++++++-------
>> 1 file changed, 12 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
>> index 12f2a0c14d33..c0157a5d8296 100644
>> --- a/arch/x86/events/core.c
>> +++ b/arch/x86/events/core.c
>> @@ -1521,19 +1521,24 @@ void perf_event_print_debug(void)
>> {
>> u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
>> u64 pebs, debugctl;
>> - int cpu = smp_processor_id();
>> - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
>> - unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
>> - unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
>> - struct event_constraint *pebs_constraints = hybrid(cpuc->pmu,
>> pebs_constraints);
>> + int cpu;
>> + struct cpu_hw_events *cpuc;好的,我会在v2中修改。
>> + unsigned long *cntr_mask, *fixed_cntr_mask;
>> + struct event_constraint *pebs_constraints;
>> unsigned long flags;
>> int idx;
>
> nit. "cpu" and "idx" can be defined together and this can be converted
> to use revere xmas tree order.
>
Ok, I'll change it in v2.
>> + local_irq_save(flags);
>
> Perhaps use "guard(irqsave)();" here since ...
>
>> +
>> + cpu = smp_processor_id();
>> + cpuc = &per_cpu(cpu_hw_events, cpu);
>> + cntr_mask = hybrid(cpuc->pmu, cntr_mask);
>> + fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
>> + pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
>> +
>> if (!*(u64 *)cntr_mask)
>
> ... a "local_irq_restore(flags)" is required here now before returning
> and using the guard can avoid that. Even the flags variable will no
> longer be necessary.
>
It's really bad, I forgot to restore irq before returning. Using lock
guard can indeed avoid such errors, and I will fix it in v2. Thanks!
> Thoughts?
>
>> return;
>> - local_irq_save(flags);
>> -
>> if (x86_pmu.version >= 2) {
>> rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
>> rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
>
© 2016 - 2025 Red Hat, Inc.