There may be concurrency between perf_cgroup_switch and
perf_cgroup_event_disable. Consider the following scenario: after a new
perf cgroup event is created on CPU0, the new event may not trigger
a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
disables this perf event, it executes __perf_remove_from_context->
list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
with perf_cgroup_switch running on CPU0.
The following describes the details of this concurrency scenario:
CPU0 CPU1
perf_cgroup_switch:
...
# cpuctx->cgrp is not NULL here
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
perf_remove_from_context:
...
raw_spin_lock_irq(&ctx->lock);
...
# ctx->is_active == 0 because reprogramm is not
# tigger, so CPU1 can do __perf_remove_from_context
# for CPU0
__perf_remove_from_context:
perf_cgroup_event_disable:
...
if (--ctx->nr_cgroups)
...
# this warning will happened because CPU1 changed
# ctx.nr_cgroups to 0.
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
To fix this problem, expand the lock-holding critical section in
perf_cgroup_switch.
Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
---
kernel/events/core.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 280d42b40b34..1e442897ebde 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -931,20 +931,20 @@ static void perf_cgroup_switch(struct task_struct *task)
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_cgroup *cgrp;
+ cgrp = perf_cgroup_from_task(task, NULL);
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
/*
* cpuctx->cgrp is set when the first cgroup event enabled,
* and is cleared when the last cgroup event disabled.
*/
if (READ_ONCE(cpuctx->cgrp) == NULL)
- return;
+ goto unlock;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
- cgrp = perf_cgroup_from_task(task, NULL);
if (READ_ONCE(cpuctx->cgrp) == cgrp)
- return;
+ goto unlock;
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_ctx_disable(&cpuctx->ctx, true);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
@@ -962,6 +962,7 @@ static void perf_cgroup_switch(struct task_struct *task)
ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
perf_ctx_enable(&cpuctx->ctx, true);
+unlock:
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
--
2.34.1
On Wed, Jun 04, 2025 at 03:39:24AM +0000, Luo Gengkun wrote:
> There may be concurrency between perf_cgroup_switch and
> perf_cgroup_event_disable. Consider the following scenario: after a new
> perf cgroup event is created on CPU0, the new event may not trigger
> a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
> disables this perf event, it executes __perf_remove_from_context->
> list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
> with perf_cgroup_switch running on CPU0.
>
> The following describes the details of this concurrency scenario:
>
> CPU0 CPU1
>
> perf_cgroup_switch:
> ...
> # cpuctx->cgrp is not NULL here
> if (READ_ONCE(cpuctx->cgrp) == NULL)
> return;
>
> perf_remove_from_context:
> ...
> raw_spin_lock_irq(&ctx->lock);
> ...
> # ctx->is_active == 0 because reprogramm is not
> # tigger, so CPU1 can do __perf_remove_from_context
> # for CPU0
> __perf_remove_from_context:
> perf_cgroup_event_disable:
> ...
> if (--ctx->nr_cgroups)
> ...
>
> # this warning will happened because CPU1 changed
> # ctx.nr_cgroups to 0.
> WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
>
> To fix this problem, expand the lock-holding critical section in
> perf_cgroup_switch.
>
> Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
> Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
> ---
Right, so how about we simply re-check the condition once we take the
lock?
Also, take the opportunity to convert to guard instead of adding goto
unlock.
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -207,6 +207,19 @@ static void perf_ctx_unlock(struct perf_
__perf_ctx_unlock(&cpuctx->ctx);
}
+typedef struct {
+ struct perf_cpu_context *cpuctx;
+ struct perf_event_context *ctx;
+} class_perf_ctx_lock_t;
+
+static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
+{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
+
+static inline class_perf_ctx_lock_t
+class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
+
#define TASK_TOMBSTONE ((void *)-1L)
static bool is_kernel_event(struct perf_event *event)
@@ -944,7 +957,13 @@ static void perf_cgroup_switch(struct ta
if (READ_ONCE(cpuctx->cgrp) == cgrp)
return;
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+ guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
+ /*
+ * Re-check, could've raced vs perf_remove_from_context().
+ */
+ if (READ_ONCE(cpuctx->cgrp) == NULL)
+ return;
+
perf_ctx_disable(&cpuctx->ctx, true);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
@@ -962,7 +981,6 @@ static void perf_cgroup_switch(struct ta
ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
perf_ctx_enable(&cpuctx->ctx, true);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
static int perf_cgroup_ensure_storage(struct perf_event *event,
On 2025/6/4 18:00, Peter Zijlstra wrote:
> On Wed, Jun 04, 2025 at 03:39:24AM +0000, Luo Gengkun wrote:
>> There may be concurrency between perf_cgroup_switch and
>> perf_cgroup_event_disable. Consider the following scenario: after a new
>> perf cgroup event is created on CPU0, the new event may not trigger
>> a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
>> disables this perf event, it executes __perf_remove_from_context->
>> list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
>> with perf_cgroup_switch running on CPU0.
>>
>> The following describes the details of this concurrency scenario:
>>
>> CPU0 CPU1
>>
>> perf_cgroup_switch:
>> ...
>> # cpuctx->cgrp is not NULL here
>> if (READ_ONCE(cpuctx->cgrp) == NULL)
>> return;
>>
>> perf_remove_from_context:
>> ...
>> raw_spin_lock_irq(&ctx->lock);
>> ...
>> # ctx->is_active == 0 because reprogramm is not
>> # tigger, so CPU1 can do __perf_remove_from_context
>> # for CPU0
>> __perf_remove_from_context:
>> perf_cgroup_event_disable:
>> ...
>> if (--ctx->nr_cgroups)
>> ...
>>
>> # this warning will happened because CPU1 changed
>> # ctx.nr_cgroups to 0.
>> WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
>>
>> To fix this problem, expand the lock-holding critical section in
>> perf_cgroup_switch.
>>
>> Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
>> Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
>> ---
> Right, so how about we simply re-check the condition once we take the
> lock?
>
> Also, take the opportunity to convert to guard instead of adding goto
> unlock.
>
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -207,6 +207,19 @@ static void perf_ctx_unlock(struct perf_
> __perf_ctx_unlock(&cpuctx->ctx);
> }
>
> +typedef struct {
> + struct perf_cpu_context *cpuctx;
> + struct perf_event_context *ctx;
> +} class_perf_ctx_lock_t;
> +
> +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
> +{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
> +
> +static inline class_perf_ctx_lock_t
> +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
> + struct perf_event_context *ctx)
> +{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
> +
> #define TASK_TOMBSTONE ((void *)-1L)
>
> static bool is_kernel_event(struct perf_event *event)
> @@ -944,7 +957,13 @@ static void perf_cgroup_switch(struct ta
> if (READ_ONCE(cpuctx->cgrp) == cgrp)
> return;
>
> - perf_ctx_lock(cpuctx, cpuctx->task_ctx);
> + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
> + /*
> + * Re-check, could've raced vs perf_remove_from_context().
> + */
> + if (READ_ONCE(cpuctx->cgrp) == NULL)
> + return;
> +
> perf_ctx_disable(&cpuctx->ctx, true);
>
> ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
> @@ -962,7 +981,6 @@ static void perf_cgroup_switch(struct ta
> ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
>
> perf_ctx_enable(&cpuctx->ctx, true);
> - perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
> }
>
> static int perf_cgroup_ensure_storage(struct perf_event *event,
Thank for your review, I will make changes based on your suggestions.
On Thu, Jun 05, 2025 at 11:55:03AM +0800, Luo Gengkun wrote:
>
> On 2025/6/4 18:00, Peter Zijlstra wrote:
> > On Wed, Jun 04, 2025 at 03:39:24AM +0000, Luo Gengkun wrote:
> > > There may be concurrency between perf_cgroup_switch and
> > > perf_cgroup_event_disable. Consider the following scenario: after a new
> > > perf cgroup event is created on CPU0, the new event may not trigger
> > > a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
> > > disables this perf event, it executes __perf_remove_from_context->
> > > list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
> > > with perf_cgroup_switch running on CPU0.
> > >
> > > The following describes the details of this concurrency scenario:
> > >
> > > CPU0 CPU1
> > >
> > > perf_cgroup_switch:
> > > ...
> > > # cpuctx->cgrp is not NULL here
> > > if (READ_ONCE(cpuctx->cgrp) == NULL)
> > > return;
> > >
> > > perf_remove_from_context:
> > > ...
> > > raw_spin_lock_irq(&ctx->lock);
> > > ...
> > > # ctx->is_active == 0 because reprogramm is not
> > > # tigger, so CPU1 can do __perf_remove_from_context
> > > # for CPU0
> > > __perf_remove_from_context:
> > > perf_cgroup_event_disable:
> > > ...
> > > if (--ctx->nr_cgroups)
> > > ...
> > >
> > > # this warning will happened because CPU1 changed
> > > # ctx.nr_cgroups to 0.
> > > WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
> > >
> > > To fix this problem, expand the lock-holding critical section in
> > > perf_cgroup_switch.
> > >
> > > Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
> > > Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
> > > ---
> > Right, so how about we simply re-check the condition once we take the
> > lock?
> >
> > Also, take the opportunity to convert to guard instead of adding goto
> > unlock.
> >
> > --- a/kernel/events/core.c
> > +++ b/kernel/events/core.c
> > @@ -207,6 +207,19 @@ static void perf_ctx_unlock(struct perf_
> > __perf_ctx_unlock(&cpuctx->ctx);
> > }
> > +typedef struct {
> > + struct perf_cpu_context *cpuctx;
> > + struct perf_event_context *ctx;
> > +} class_perf_ctx_lock_t;
> > +
> > +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
> > +{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
> > +
> > +static inline class_perf_ctx_lock_t
> > +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
> > + struct perf_event_context *ctx)
> > +{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
> > +
> > #define TASK_TOMBSTONE ((void *)-1L)
> > static bool is_kernel_event(struct perf_event *event)
> > @@ -944,7 +957,13 @@ static void perf_cgroup_switch(struct ta
> > if (READ_ONCE(cpuctx->cgrp) == cgrp)
> > return;
> > - perf_ctx_lock(cpuctx, cpuctx->task_ctx);
> > + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
> > + /*
> > + * Re-check, could've raced vs perf_remove_from_context().
> > + */
> > + if (READ_ONCE(cpuctx->cgrp) == NULL)
> > + return;
> > +
> > perf_ctx_disable(&cpuctx->ctx, true);
> > ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
> > @@ -962,7 +981,6 @@ static void perf_cgroup_switch(struct ta
> > ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
> > perf_ctx_enable(&cpuctx->ctx, true);
> > - perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
> > }
> > static int perf_cgroup_ensure_storage(struct perf_event *event,
>
> Thank for your review, I will make changes based on your suggestions.
>
No need to resend. I've got your patch with modifications. But please
confirm it does work :-)
The following commit has been merged into the perf/urgent branch of tip:
Commit-ID: 3172fb986666dfb71bf483b6d3539e1e587fa197
Gitweb: https://git.kernel.org/tip/3172fb986666dfb71bf483b6d3539e1e587fa197
Author: Luo Gengkun <luogengkun@huaweicloud.com>
AuthorDate: Wed, 04 Jun 2025 03:39:24
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Thu, 05 Jun 2025 14:37:52 +02:00
perf/core: Fix WARN in perf_cgroup_switch()
There may be concurrency between perf_cgroup_switch and
perf_cgroup_event_disable. Consider the following scenario: after a new
perf cgroup event is created on CPU0, the new event may not trigger
a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
disables this perf event, it executes __perf_remove_from_context->
list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
with perf_cgroup_switch running on CPU0.
The following describes the details of this concurrency scenario:
CPU0 CPU1
perf_cgroup_switch:
...
# cpuctx->cgrp is not NULL here
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
perf_remove_from_context:
...
raw_spin_lock_irq(&ctx->lock);
...
# ctx->is_active == 0 because reprogramm is not
# tigger, so CPU1 can do __perf_remove_from_context
# for CPU0
__perf_remove_from_context:
perf_cgroup_event_disable:
...
if (--ctx->nr_cgroups)
...
# this warning will happened because CPU1 changed
# ctx.nr_cgroups to 0.
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
[peterz: use guard instead of goto unlock]
Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250604033924.3914647-3-luogengkun@huaweicloud.com
---
kernel/events/core.c | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d786083..d7cf008 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -207,6 +207,19 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
__perf_ctx_unlock(&cpuctx->ctx);
}
+typedef struct {
+ struct perf_cpu_context *cpuctx;
+ struct perf_event_context *ctx;
+} class_perf_ctx_lock_t;
+
+static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
+{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
+
+static inline class_perf_ctx_lock_t
+class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
+
#define TASK_TOMBSTONE ((void *)-1L)
static bool is_kernel_event(struct perf_event *event)
@@ -944,7 +957,13 @@ static void perf_cgroup_switch(struct task_struct *task)
if (READ_ONCE(cpuctx->cgrp) == cgrp)
return;
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+ guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
+ /*
+ * Re-check, could've raced vs perf_remove_from_context().
+ */
+ if (READ_ONCE(cpuctx->cgrp) == NULL)
+ return;
+
perf_ctx_disable(&cpuctx->ctx, true);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
@@ -962,7 +981,6 @@ static void perf_cgroup_switch(struct task_struct *task)
ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
perf_ctx_enable(&cpuctx->ctx, true);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
static int perf_cgroup_ensure_storage(struct perf_event *event,
On 2025/6/11 17:29, tip-bot2 for Luo Gengkun wrote:
> The following commit has been merged into the perf/urgent branch of tip:
>
> Commit-ID: 3172fb986666dfb71bf483b6d3539e1e587fa197
> Gitweb: https://git.kernel.org/tip/3172fb986666dfb71bf483b6d3539e1e587fa197
> Author: Luo Gengkun <luogengkun@huaweicloud.com>
> AuthorDate: Wed, 04 Jun 2025 03:39:24
> Committer: Peter Zijlstra <peterz@infradead.org>
> CommitterDate: Thu, 05 Jun 2025 14:37:52 +02:00
>
> perf/core: Fix WARN in perf_cgroup_switch()
>
> There may be concurrency between perf_cgroup_switch and
> perf_cgroup_event_disable. Consider the following scenario: after a new
> perf cgroup event is created on CPU0, the new event may not trigger
> a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
> disables this perf event, it executes __perf_remove_from_context->
> list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
> with perf_cgroup_switch running on CPU0.
>
> The following describes the details of this concurrency scenario:
>
> CPU0 CPU1
>
> perf_cgroup_switch:
> ...
> # cpuctx->cgrp is not NULL here
> if (READ_ONCE(cpuctx->cgrp) == NULL)
> return;
>
> perf_remove_from_context:
> ...
> raw_spin_lock_irq(&ctx->lock);
> ...
> # ctx->is_active == 0 because reprogramm is not
> # tigger, so CPU1 can do __perf_remove_from_context
> # for CPU0
> __perf_remove_from_context:
> perf_cgroup_event_disable:
> ...
> if (--ctx->nr_cgroups)
> ...
>
> # this warning will happened because CPU1 changed
> # ctx.nr_cgroups to 0.
> WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
>
> [peterz: use guard instead of goto unlock]
> Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
> Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Link: https://lkml.kernel.org/r/20250604033924.3914647-3-luogengkun@huaweicloud.com
Sorry for the late reply, I found that the link is v2 instead of v3.
This v3 link is:
https://lore.kernel.org/all/20250609035316.250557-1-luogengkun@huaweicloud.com/
v2 attempts to fix a concurrency problem between perf_cgroup_switch
and perf_cgroup_event_disable. But it does not to move WARN_ON_ONCE
into lock-protected region, so the warning is still triggered.
The following patches have been tested and fix this issue.
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1f746469fda5..a35784d42c66 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -951,8 +951,6 @@ static void perf_cgroup_switch(struct task_struct *task)
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
- WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
-
cgrp = perf_cgroup_from_task(task, NULL);
if (READ_ONCE(cpuctx->cgrp) == cgrp)
return;
@@ -964,6 +962,8 @@ static void perf_cgroup_switch(struct task_struct *task)
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
+ WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+
perf_ctx_disable(&cpuctx->ctx, true);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
> ---
> kernel/events/core.c | 22 ++++++++++++++++++++--
> 1 file changed, 20 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index d786083..d7cf008 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -207,6 +207,19 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
> __perf_ctx_unlock(&cpuctx->ctx);
> }
>
> +typedef struct {
> + struct perf_cpu_context *cpuctx;
> + struct perf_event_context *ctx;
> +} class_perf_ctx_lock_t;
> +
> +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
> +{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
> +
> +static inline class_perf_ctx_lock_t
> +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
> + struct perf_event_context *ctx)
> +{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
> +
> #define TASK_TOMBSTONE ((void *)-1L)
>
> static bool is_kernel_event(struct perf_event *event)
> @@ -944,7 +957,13 @@ static void perf_cgroup_switch(struct task_struct *task)
> if (READ_ONCE(cpuctx->cgrp) == cgrp)
> return;
>
> - perf_ctx_lock(cpuctx, cpuctx->task_ctx);
> + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
> + /*
> + * Re-check, could've raced vs perf_remove_from_context().
> + */
> + if (READ_ONCE(cpuctx->cgrp) == NULL)
> + return;
> +
> perf_ctx_disable(&cpuctx->ctx, true);
>
> ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
> @@ -962,7 +981,6 @@ static void perf_cgroup_switch(struct task_struct *task)
> ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
>
> perf_ctx_enable(&cpuctx->ctx, true);
> - perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
> }
>
> static int perf_cgroup_ensure_storage(struct perf_event *event,
On Thu, Jun 26, 2025 at 09:08:38PM +0800, Luo Gengkun wrote: > Sorry for the late reply, I found that the link is v2 instead of v3. > This v3 link is: > > https://lore.kernel.org/all/20250609035316.250557-1-luogengkun@huaweicloud.com/ > > v2 attempts to fix a concurrency problem between perf_cgroup_switch > and perf_cgroup_event_disable. But it does not to move WARN_ON_ONCE > into lock-protected region, so the warning is still triggered. > > The following patches have been tested and fix this issue. > > diff --git a/kernel/events/core.c b/kernel/events/core.c > index 1f746469fda5..a35784d42c66 100644 > --- a/kernel/events/core.c > +++ b/kernel/events/core.c > @@ -951,8 +951,6 @@ static void perf_cgroup_switch(struct task_struct *task) > if (READ_ONCE(cpuctx->cgrp) == NULL) > return; > > - WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); > - > cgrp = perf_cgroup_from_task(task, NULL); > if (READ_ONCE(cpuctx->cgrp) == cgrp) > return; > @@ -964,6 +962,8 @@ static void perf_cgroup_switch(struct task_struct *task) > if (READ_ONCE(cpuctx->cgrp) == NULL) > return; > > + WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); > + > perf_ctx_disable(&cpuctx->ctx, true); > > ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); Can you send as a full new patch, the thing is already in Linus' tree.
© 2016 - 2025 Red Hat, Inc.