arch/x86/events/intel/bts.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-)
From: Li RongQing <lirongqing@baidu.com>
bts_ctx maybe not allocated, for example if the cpu has X86_FEATURE_PTI,
but intel_bts_disable/enable_local and intel_bts_interrupt are called
unconditionally from intel_pmu_handle_irq and exploding on accessing
bts_ctx
so check if bts_ctx is allocated when call bts functions
Fixes: 3acfcefa795c "(perf/x86/intel/bts: Allocate bts_ctx only if necessary)"
Reported-by: Jiri Olsa <olsajiri@gmail.com>
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Suggested-by: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
---
arch/x86/events/intel/bts.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 8e09319..e8b3e7b 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -338,9 +338,14 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- int state = READ_ONCE(bts->state);
+ struct bts_ctx *bts;
+ int state;
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
+ state = READ_ONCE(bts->state);
/*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
@@ -358,7 +363,12 @@ void intel_bts_enable_local(void)
void intel_bts_disable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
+ struct bts_ctx *bts;
+
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
/*
* Here we transition from ACTIVE to INACTIVE;
@@ -450,12 +460,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void)
{
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- struct perf_event *event = bts->handle.event;
+ struct bts_ctx *bts;
+ struct perf_event *event;
struct bts_buffer *buf;
s64 old_head;
int err = -ENOSPC, handled = 0;
+ if (!bts_ctx)
+ return 0;
+
+ bts = this_cpu_ptr(bts_ctx);
+ event = bts->handle.event;
/*
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
--
2.9.4
On Thu, Mar 06, 2025 at 01:11:02PM +0800, lirongqing wrote:
> From: Li RongQing <lirongqing@baidu.com>
>
> bts_ctx maybe not allocated, for example if the cpu has X86_FEATURE_PTI,
> but intel_bts_disable/enable_local and intel_bts_interrupt are called
> unconditionally from intel_pmu_handle_irq and exploding on accessing
> bts_ctx
>
> so check if bts_ctx is allocated when call bts functions
>
> Fixes: 3acfcefa795c "(perf/x86/intel/bts: Allocate bts_ctx only if necessary)"
> Reported-by: Jiri Olsa <olsajiri@gmail.com>
Tested-by: Jiri Olsa <jolsa@kernel.org>
thanks,
jirka
> Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
> Suggested-by: Dave Hansen <dave.hansen@intel.com>
> Signed-off-by: Li RongQing <lirongqing@baidu.com>
> ---
> arch/x86/events/intel/bts.c | 25 ++++++++++++++++++++-----
> 1 file changed, 20 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
> index 8e09319..e8b3e7b 100644
> --- a/arch/x86/events/intel/bts.c
> +++ b/arch/x86/events/intel/bts.c
> @@ -338,9 +338,14 @@ static void bts_event_stop(struct perf_event *event, int flags)
>
> void intel_bts_enable_local(void)
> {
> - struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
> - int state = READ_ONCE(bts->state);
> + struct bts_ctx *bts;
> + int state;
>
> + if (!bts_ctx)
> + return;
> +
> + bts = this_cpu_ptr(bts_ctx);
> + state = READ_ONCE(bts->state);
> /*
> * Here we transition from INACTIVE to ACTIVE;
> * if we instead are STOPPED from the interrupt handler,
> @@ -358,7 +363,12 @@ void intel_bts_enable_local(void)
>
> void intel_bts_disable_local(void)
> {
> - struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
> + struct bts_ctx *bts;
> +
> + if (!bts_ctx)
> + return;
> +
> + bts = this_cpu_ptr(bts_ctx);
>
> /*
> * Here we transition from ACTIVE to INACTIVE;
> @@ -450,12 +460,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
> int intel_bts_interrupt(void)
> {
> struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
> - struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
> - struct perf_event *event = bts->handle.event;
> + struct bts_ctx *bts;
> + struct perf_event *event;
> struct bts_buffer *buf;
> s64 old_head;
> int err = -ENOSPC, handled = 0;
>
> + if (!bts_ctx)
> + return 0;
> +
> + bts = this_cpu_ptr(bts_ctx);
> + event = bts->handle.event;
> /*
> * The only surefire way of knowing if this NMI is ours is by checking
> * the write ptr against the PMI threshold.
> --
> 2.9.4
>
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 7a310c644cf571fbdb1d447a1dc39cf048634589
Gitweb: https://git.kernel.org/tip/7a310c644cf571fbdb1d447a1dc39cf048634589
Author: Li RongQing <lirongqing@baidu.com>
AuthorDate: Thu, 06 Mar 2025 13:11:02 +08:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Thu, 06 Mar 2025 22:42:26 +01:00
perf/x86/intel/bts: Check if bts_ctx is allocated when calling BTS functions
bts_ctx might not be allocated, for example if the CPU has X86_FEATURE_PTI,
but intel_bts_disable/enable_local() and intel_bts_interrupt() are called
unconditionally from intel_pmu_handle_irq() and crash on bts_ctx.
So check if bts_ctx is allocated when calling BTS functions.
Fixes: 3acfcefa795c ("perf/x86/intel/bts: Allocate bts_ctx only if necessary")
Reported-by: Jiri Olsa <olsajiri@gmail.com>
Tested-by: Jiri Olsa <jolsa@kernel.org>
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Suggested-by: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250306051102.2642-1-lirongqing@baidu.com
---
arch/x86/events/intel/bts.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 953868d..39a987d 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -338,9 +338,14 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- int state = READ_ONCE(bts->state);
+ struct bts_ctx *bts;
+ int state;
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
+ state = READ_ONCE(bts->state);
/*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
@@ -358,7 +363,12 @@ void intel_bts_enable_local(void)
void intel_bts_disable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
+ struct bts_ctx *bts;
+
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
/*
* Here we transition from ACTIVE to INACTIVE;
@@ -450,12 +460,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void)
{
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- struct perf_event *event = bts->handle.event;
+ struct bts_ctx *bts;
+ struct perf_event *event;
struct bts_buffer *buf;
s64 old_head;
int err = -ENOSPC, handled = 0;
+ if (!bts_ctx)
+ return 0;
+
+ bts = this_cpu_ptr(bts_ctx);
+ event = bts->handle.event;
/*
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
© 2016 - 2026 Red Hat, Inc.