mm/damon/sysfs.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-)
state_show() reads kdamond->damon_ctx without holding damon_sysfs_lock.
This allows a use-after-free race:
CPU 0 CPU 1
----- -----
state_show() damon_sysfs_turn_damon_on()
ctx = kdamond->damon_ctx; mutex_lock(&damon_sysfs_lock);
damon_destroy_ctx(kdamond->damon_ctx);
kdamond->damon_ctx = NULL;
mutex_unlock(&damon_sysfs_lock);
damon_is_running(ctx); /* ctx is freed */
mutex_lock(&ctx->kdamond_lock); /* UAF */
(The race can also occur with damon_sysfs_kdamonds_rm_dirs() and
damon_sysfs_kdamond_release(), which free or replace the context under
damon_sysfs_lock.)
Fix by taking damon_sysfs_lock before dereferencing the context,
mirroring the locking used in pid_show().
The bug has existed since state_show() first accessed kdamond->damon_ctx.
Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring")
Reported-by: Stanislav Fort <disclosure@aisle.com>
Closes: N/A # non-publicly reported
Signed-off-by: Stanislav Fort <disclosure@aisle.com>
---
mm/damon/sysfs.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 6d2b0dab50cb..7b9254cadd5f 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1260,14 +1260,18 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
{
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
struct damon_sysfs_kdamond, kobj);
- struct damon_ctx *ctx = kdamond->damon_ctx;
- bool running;
+ struct damon_ctx *ctx;
+ bool running = false;
- if (!ctx)
- running = false;
- else
+ if (!mutex_trylock(&damon_sysfs_lock))
+ return -EBUSY;
+
+ ctx = kdamond->damon_ctx;
+ if (ctx)
running = damon_is_running(ctx);
+ mutex_unlock(&damon_sysfs_lock);
+
return sysfs_emit(buf, "%s\n", running ?
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
--
2.39.3 (Apple Git-146)
+ Andrew On Fri, 5 Sep 2025 13:10:46 +0300 Stanislav Fort <stanislav.fort@aisle.com> wrote: > state_show() reads kdamond->damon_ctx without holding damon_sysfs_lock. > This allows a use-after-free race: > > CPU 0 CPU 1 > ----- ----- > state_show() damon_sysfs_turn_damon_on() > ctx = kdamond->damon_ctx; mutex_lock(&damon_sysfs_lock); > damon_destroy_ctx(kdamond->damon_ctx); > kdamond->damon_ctx = NULL; > mutex_unlock(&damon_sysfs_lock); > damon_is_running(ctx); /* ctx is freed */ > mutex_lock(&ctx->kdamond_lock); /* UAF */ > > (The race can also occur with damon_sysfs_kdamonds_rm_dirs() and > damon_sysfs_kdamond_release(), which free or replace the context under > damon_sysfs_lock.) > > Fix by taking damon_sysfs_lock before dereferencing the context, > mirroring the locking used in pid_show(). > > The bug has existed since state_show() first accessed kdamond->damon_ctx. > > Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring") > Reported-by: Stanislav Fort <disclosure@aisle.com> > Closes: N/A # non-publicly reported > Signed-off-by: Stanislav Fort <disclosure@aisle.com> Reviewed-by: SeongJae Park <sj@kernel.org> Thanks, SJ [...]
© 2016 - 2025 Red Hat, Inc.