include/linux/ww_mutex.h | 14 ++++++++++++++ kernel/locking/test-ww_mutex.c | 8 +++++--- 2 files changed, 19 insertions(+), 3 deletions(-)
When using mutex_acquire_nest() with a nest_lock, lockdep refcounts the
number of acquired lockdep_maps of mutexes of the same class, and also
keeps a pointer to the first acquired lockdep_map of a class. That pointer
is then used for various comparison-, printing- and checking purposes,
but there is no mechanism to actively ensure that lockdep_map stays in
memory. Instead, a warning is printed if the lockdep_map is freed and
there are still held locks of the same lock class, even if the lockdep_map
itself has been released.
In the context of WW/WD transactions that means that if a user unlocks
and frees a ww_mutex from within an ongoing ww transaction, and that
mutex happens to be the first ww_mutex grabbed in the transaction,
such a warning is printed and there might be a risk of a UAF.
Note that this is only problem when lockdep is enabled and affects only
dereferences of struct lockdep_map.
Adjust to this by adding a fake lockdep_map to the acquired context and
make sure it is the first acquired lockdep map of the associated
ww_mutex class. Then hold it for the duration of the WW/WD transaction.
This has the side effect that trying to lock a ww mutex *without* a
ww_acquire_context but where a such context has been acquire, we'd see
a lockdep splat. The test-ww_mutex.c selftest attempts to do that, so
modify that particular test to not acquire a ww_acquire_context if it
is not going to be used.
v2:
- Lower the number of locks in the test-ww_mutex
stress(STRESS_ALL) test to accommodate the dummy lock
introduced in this patch without overflowing lockdep held lock
references.
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Maarten Lankhorst <maarten@lankhorst.se>
Cc: Christian König <christian.koenig@amd.com>
Cc: dri-devel@lists.freedesktop.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
include/linux/ww_mutex.h | 14 ++++++++++++++
kernel/locking/test-ww_mutex.c | 8 +++++---
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index bb763085479a..a401a2f31a77 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -65,6 +65,16 @@ struct ww_acquire_ctx {
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
+ /**
+ * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex.
+ *
+ * lockdep requires the lockdep_map for the first locked ww_mutex
+ * in a ww transaction to remain in memory until all ww_mutexes of
+ * the transaction have been unlocked. Ensure this by keeping a
+ * fake locked ww_mutex lockdep map between ww_acquire_init() and
+ * ww_acquire_fini().
+ */
+ struct lockdep_map first_lock_dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned int deadlock_inject_interval;
@@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
+ lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name,
+ &ww_class->mutex_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
+ mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
@@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
mutex_release(&ctx->dep_map, _THIS_IP_);
#endif
#ifdef DEBUG_WW_MUTEXES
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 10a5736a21c2..5d58b2c0ef98 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags)
int ret;
ww_mutex_init(&mtx.mutex, &ww_class);
- ww_acquire_init(&ctx, &ww_class);
+ if (flags & TEST_MTX_CTX)
+ ww_acquire_init(&ctx, &ww_class);
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
init_completion(&mtx.ready);
@@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags)
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
}
ww_mutex_unlock(&mtx.mutex);
- ww_acquire_fini(&ctx);
+ if (flags & TEST_MTX_CTX)
+ ww_acquire_fini(&ctx);
if (ret) {
pr_err("%s(flags=%x): mutual exclusion failure\n",
@@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
- ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
+ ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
if (ret)
return ret;
--
2.46.0
Hi Thomas, On Wed, Oct 09, 2024 at 11:20:31AM +0200, Thomas Hellström wrote: > When using mutex_acquire_nest() with a nest_lock, lockdep refcounts the > number of acquired lockdep_maps of mutexes of the same class, and also > keeps a pointer to the first acquired lockdep_map of a class. That pointer > is then used for various comparison-, printing- and checking purposes, > but there is no mechanism to actively ensure that lockdep_map stays in > memory. Instead, a warning is printed if the lockdep_map is freed and > there are still held locks of the same lock class, even if the lockdep_map > itself has been released. > > In the context of WW/WD transactions that means that if a user unlocks > and frees a ww_mutex from within an ongoing ww transaction, and that > mutex happens to be the first ww_mutex grabbed in the transaction, > such a warning is printed and there might be a risk of a UAF. > > Note that this is only problem when lockdep is enabled and affects only > dereferences of struct lockdep_map. > > Adjust to this by adding a fake lockdep_map to the acquired context and > make sure it is the first acquired lockdep map of the associated > ww_mutex class. Then hold it for the duration of the WW/WD transaction. > > This has the side effect that trying to lock a ww mutex *without* a > ww_acquire_context but where a such context has been acquire, we'd see > a lockdep splat. The test-ww_mutex.c selftest attempts to do that, so > modify that particular test to not acquire a ww_acquire_context if it > is not going to be used. > > v2: > - Lower the number of locks in the test-ww_mutex > stress(STRESS_ALL) test to accommodate the dummy lock > introduced in this patch without overflowing lockdep held lock > references. > Have you tested your patch with lib/locking-selftests.c? It reported two errors for me: [..] | Wound/wait tests | [..] --------------------- [..] ww api failures: ok |FAILED| ok | [..] ww contexts mixing: ok | ok | [..] finishing ww context: ok | ok | ok | ok | [..] locking mismatches: ok | ok | ok | [..] EDEADLK handling: ok | ok | ok | ok | ok | ok | ok | ok | ok | ok | [..] spinlock nest unlocked: ok | [..] spinlock nest test: ok | [..] ----------------------------------------------------- [..] |block | try |context| [..] ----------------------------------------------------- [..] context: ok | ok | ok | [..] try: ok | ok | ok | [..] block: ok | ok | ok | [..] spinlock: ok | ok |FAILED| The first one is a use case issue, I think and can be fixed similar to your changes in test-ww_mutex.c: diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 6f6a5fc85b42..6750321e3e9a 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1720,8 +1720,6 @@ static void ww_test_normal(void) { int ret; - WWAI(&t); - /* * None of the ww_mutex codepaths should be taken in the 'normal' * mutex calls. The easiest way to verify this is by using the @@ -1770,6 +1768,8 @@ static void ww_test_normal(void) ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); + WWAI(&t); + /* nest_lock */ o.ctx = (void *)~0UL; ww_mutex_base_lock_nest_lock(&o.base, &t); Please confirm whether this change is intended. The second is a case as follow: ww_acquire_init(...); spin_lock(...); ww_mutex_lock(...); // this should trigger a context // invalidation. But the mutex was // initialized by ww_acquire_init() as a // LD_WAIT_INV lock. The following could fix this: diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index a401a2f31a77..45ff6f7a872b 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, &ww_class->acquire_key, 0); - lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name, - &ww_class->mutex_key, 0); + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class->mutex_name, + &ww_class->mutex_key, 0, LD_WAIT_SLEEP); mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_); #endif A v3 with all these fixed would look good to me, and I can add a Tested-by tag to it. Thanks! Regards, Boqun > Cc: Peter Zijlstra <peterz@infradead.org> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: Will Deacon <will@kernel.org> > Cc: Waiman Long <longman@redhat.com> > Cc: Boqun Feng <boqun.feng@gmail.com> > Cc: Maarten Lankhorst <maarten@lankhorst.se> > Cc: Christian König <christian.koenig@amd.com> > Cc: dri-devel@lists.freedesktop.org > Cc: linux-kernel@vger.kernel.org > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > --- > include/linux/ww_mutex.h | 14 ++++++++++++++ > kernel/locking/test-ww_mutex.c | 8 +++++--- > 2 files changed, 19 insertions(+), 3 deletions(-) > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > index bb763085479a..a401a2f31a77 100644 > --- a/include/linux/ww_mutex.h > +++ b/include/linux/ww_mutex.h > @@ -65,6 +65,16 @@ struct ww_acquire_ctx { > #endif > #ifdef CONFIG_DEBUG_LOCK_ALLOC > struct lockdep_map dep_map; > + /** > + * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex. > + * > + * lockdep requires the lockdep_map for the first locked ww_mutex > + * in a ww transaction to remain in memory until all ww_mutexes of > + * the transaction have been unlocked. Ensure this by keeping a > + * fake locked ww_mutex lockdep map between ww_acquire_init() and > + * ww_acquire_fini(). > + */ > + struct lockdep_map first_lock_dep_map; > #endif > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > unsigned int deadlock_inject_interval; > @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > &ww_class->acquire_key, 0); > + lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name, > + &ww_class->mutex_key, 0); > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_); > #endif > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > ctx->deadlock_inject_interval = 1; > @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) > static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) > { > #ifdef CONFIG_DEBUG_LOCK_ALLOC > + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); > mutex_release(&ctx->dep_map, _THIS_IP_); > #endif > #ifdef DEBUG_WW_MUTEXES > diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c > index 10a5736a21c2..5d58b2c0ef98 100644 > --- a/kernel/locking/test-ww_mutex.c > +++ b/kernel/locking/test-ww_mutex.c > @@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags) > int ret; > > ww_mutex_init(&mtx.mutex, &ww_class); > - ww_acquire_init(&ctx, &ww_class); > + if (flags & TEST_MTX_CTX) > + ww_acquire_init(&ctx, &ww_class); > > INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); > init_completion(&mtx.ready); > @@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags) > ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); > } > ww_mutex_unlock(&mtx.mutex); > - ww_acquire_fini(&ctx); > + if (flags & TEST_MTX_CTX) > + ww_acquire_fini(&ctx); > > if (ret) { > pr_err("%s(flags=%x): mutual exclusion failure\n", > @@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void) > if (ret) > return ret; > > - ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL); > + ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL); > if (ret) > return ret; > > -- > 2.46.0 >
On Mon, 2024-10-14 at 13:23 -0700, Boqun Feng wrote: > Hi Thomas, > > On Wed, Oct 09, 2024 at 11:20:31AM +0200, Thomas Hellström wrote: > > When using mutex_acquire_nest() with a nest_lock, lockdep refcounts > > the > > number of acquired lockdep_maps of mutexes of the same class, and > > also > > keeps a pointer to the first acquired lockdep_map of a class. That > > pointer > > is then used for various comparison-, printing- and checking > > purposes, > > but there is no mechanism to actively ensure that lockdep_map stays > > in > > memory. Instead, a warning is printed if the lockdep_map is freed > > and > > there are still held locks of the same lock class, even if the > > lockdep_map > > itself has been released. > > > > In the context of WW/WD transactions that means that if a user > > unlocks > > and frees a ww_mutex from within an ongoing ww transaction, and > > that > > mutex happens to be the first ww_mutex grabbed in the transaction, > > such a warning is printed and there might be a risk of a UAF. > > > > Note that this is only problem when lockdep is enabled and affects > > only > > dereferences of struct lockdep_map. > > > > Adjust to this by adding a fake lockdep_map to the acquired context > > and > > make sure it is the first acquired lockdep map of the associated > > ww_mutex class. Then hold it for the duration of the WW/WD > > transaction. > > > > This has the side effect that trying to lock a ww mutex *without* a > > ww_acquire_context but where a such context has been acquire, we'd > > see > > a lockdep splat. The test-ww_mutex.c selftest attempts to do that, > > so > > modify that particular test to not acquire a ww_acquire_context if > > it > > is not going to be used. > > > > v2: > > - Lower the number of locks in the test-ww_mutex > > stress(STRESS_ALL) test to accommodate the dummy lock > > introduced in this patch without overflowing lockdep held lock > > references. > > > > Have you tested your patch with lib/locking-selftests.c? It reported > two > errors for me: > > [..] | Wound/wait tests | > [..] --------------------- > [..] ww api failures: ok |FAILED| ok | > [..] ww contexts mixing: ok | ok | > [..] finishing ww context: ok | ok | ok > | ok | > [..] locking mismatches: ok | ok | ok | > [..] EDEADLK handling: ok | ok | ok > | ok | ok | ok | ok | ok | ok | ok | > [..] spinlock nest unlocked: ok | > [..] spinlock nest test: ok | > [..] ----------------------------------------------------- > [..] |block | try > |context| > [..] ----------------------------------------------------- > [..] context: ok | ok | ok | > [..] try: ok | ok | ok | > [..] block: ok | ok | ok | > [..] spinlock: ok | ok |FAILED| > > The first one is a use case issue, I think and can be fixed similar > to > your changes in test-ww_mutex.c: > > diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c > index 6f6a5fc85b42..6750321e3e9a 100644 > --- a/lib/locking-selftest.c > +++ b/lib/locking-selftest.c > @@ -1720,8 +1720,6 @@ static void ww_test_normal(void) > { > int ret; > > - WWAI(&t); > - > /* > * None of the ww_mutex codepaths should be taken in the > 'normal' > * mutex calls. The easiest way to verify this is by using > the > @@ -1770,6 +1768,8 @@ static void ww_test_normal(void) > ww_mutex_base_unlock(&o.base); > WARN_ON(o.ctx != (void *)~0UL); > > + WWAI(&t); > + > /* nest_lock */ > o.ctx = (void *)~0UL; > ww_mutex_base_lock_nest_lock(&o.base, &t); > > Please confirm whether this change is intended. This fix looks correct and while this change was not intended, I think it makes sense and if this locking order is present in existing code apart from this selftest, it's probably easily fixable. > > The second is a case as follow: > > ww_acquire_init(...); > spin_lock(...); > ww_mutex_lock(...); // this should trigger a context > // invalidation. But the mutex was > // initialized by ww_acquire_init() as a > // LD_WAIT_INV lock. > > The following could fix this: > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > index a401a2f31a77..45ff6f7a872b 100644 > --- a/include/linux/ww_mutex.h > +++ b/include/linux/ww_mutex.h > @@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct > ww_acquire_ctx *ctx, > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > &ww_class->acquire_key, 0); > - lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > >mutex_name, > - &ww_class->mutex_key, 0); > + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class- > >mutex_name, > + &ww_class->mutex_key, 0, > LD_WAIT_SLEEP); > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > >dep_map, _RET_IP_); > #endif > > A v3 with all these fixed would look good to me, and I can add a > Tested-by tag to it. Thanks! The fix here is a bit confusing. It looks like this test is crafted to fail because we take a sleeping ww_mutex inside a spinlock. But the ww_mutex lockdep map is already initialized as LD_WAIT_SLEEP. How come the first_lock_dep_map locking mode LD_WAIT_INV is used in the ww_mutex_lock()? Is that because of the lockdep hlock refcounting? Thanks, Thomas > > Regards, > Boqun > > > Cc: Peter Zijlstra <peterz@infradead.org> > > Cc: Ingo Molnar <mingo@redhat.com> > > Cc: Will Deacon <will@kernel.org> > > Cc: Waiman Long <longman@redhat.com> > > Cc: Boqun Feng <boqun.feng@gmail.com> > > Cc: Maarten Lankhorst <maarten@lankhorst.se> > > Cc: Christian König <christian.koenig@amd.com> > > Cc: dri-devel@lists.freedesktop.org > > Cc: linux-kernel@vger.kernel.org > > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > > --- > > include/linux/ww_mutex.h | 14 ++++++++++++++ > > kernel/locking/test-ww_mutex.c | 8 +++++--- > > 2 files changed, 19 insertions(+), 3 deletions(-) > > > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > > index bb763085479a..a401a2f31a77 100644 > > --- a/include/linux/ww_mutex.h > > +++ b/include/linux/ww_mutex.h > > @@ -65,6 +65,16 @@ struct ww_acquire_ctx { > > #endif > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > struct lockdep_map dep_map; > > + /** > > + * @first_lock_dep_map: fake lockdep_map for first locked > > ww_mutex. > > + * > > + * lockdep requires the lockdep_map for the first locked > > ww_mutex > > + * in a ww transaction to remain in memory until all > > ww_mutexes of > > + * the transaction have been unlocked. Ensure this by > > keeping a > > + * fake locked ww_mutex lockdep map between > > ww_acquire_init() and > > + * ww_acquire_fini(). > > + */ > > + struct lockdep_map first_lock_dep_map; > > #endif > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > unsigned int deadlock_inject_interval; > > @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct > > ww_acquire_ctx *ctx, > > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > > &ww_class->acquire_key, 0); > > + lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > > >mutex_name, > > + &ww_class->mutex_key, 0); > > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > > + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > > >dep_map, _RET_IP_); > > #endif > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > ctx->deadlock_inject_interval = 1; > > @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct > > ww_acquire_ctx *ctx) > > static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) > > { > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); > > mutex_release(&ctx->dep_map, _THIS_IP_); > > #endif > > #ifdef DEBUG_WW_MUTEXES > > diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test- > > ww_mutex.c > > index 10a5736a21c2..5d58b2c0ef98 100644 > > --- a/kernel/locking/test-ww_mutex.c > > +++ b/kernel/locking/test-ww_mutex.c > > @@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags) > > int ret; > > > > ww_mutex_init(&mtx.mutex, &ww_class); > > - ww_acquire_init(&ctx, &ww_class); > > + if (flags & TEST_MTX_CTX) > > + ww_acquire_init(&ctx, &ww_class); > > > > INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); > > init_completion(&mtx.ready); > > @@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags) > > ret = wait_for_completion_timeout(&mtx.done, > > TIMEOUT); > > } > > ww_mutex_unlock(&mtx.mutex); > > - ww_acquire_fini(&ctx); > > + if (flags & TEST_MTX_CTX) > > + ww_acquire_fini(&ctx); > > > > if (ret) { > > pr_err("%s(flags=%x): mutual exclusion failure\n", > > @@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void) > > if (ret) > > return ret; > > > > - ret = stress(2047, hweight32(STRESS_ALL)*ncpus, > > STRESS_ALL); > > + ret = stress(2046, hweight32(STRESS_ALL)*ncpus, > > STRESS_ALL); > > if (ret) > > return ret; > > > > -- > > 2.46.0 > >
On Tue, Oct 15, 2024 at 05:27:28PM +0200, Thomas Hellström wrote: [..] > > diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c > > index 6f6a5fc85b42..6750321e3e9a 100644 > > --- a/lib/locking-selftest.c > > +++ b/lib/locking-selftest.c > > @@ -1720,8 +1720,6 @@ static void ww_test_normal(void) > > { > > int ret; > > > > - WWAI(&t); > > - > > /* > > * None of the ww_mutex codepaths should be taken in the > > 'normal' > > * mutex calls. The easiest way to verify this is by using > > the > > @@ -1770,6 +1768,8 @@ static void ww_test_normal(void) > > ww_mutex_base_unlock(&o.base); > > WARN_ON(o.ctx != (void *)~0UL); > > > > + WWAI(&t); > > + > > /* nest_lock */ > > o.ctx = (void *)~0UL; > > ww_mutex_base_lock_nest_lock(&o.base, &t); > > > > Please confirm whether this change is intended. > > This fix looks correct and while this change was not intended, I think > it makes sense and if this locking order is present in existing code > apart from this selftest, it's probably easily fixable. > > > > > The second is a case as follow: > > > > ww_acquire_init(...); > > spin_lock(...); > > ww_mutex_lock(...); // this should trigger a context > > // invalidation. But the mutex was > > // initialized by ww_acquire_init() as a > > // LD_WAIT_INV lock. > > > > The following could fix this: > > > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > > index a401a2f31a77..45ff6f7a872b 100644 > > --- a/include/linux/ww_mutex.h > > +++ b/include/linux/ww_mutex.h > > @@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct > > ww_acquire_ctx *ctx, > > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > > &ww_class->acquire_key, 0); > > - lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > > >mutex_name, > > - &ww_class->mutex_key, 0); > > + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class- > > >mutex_name, > > + &ww_class->mutex_key, 0, > > LD_WAIT_SLEEP); > > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > > mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > > >dep_map, _RET_IP_); > > #endif > > > > A v3 with all these fixed would look good to me, and I can add a > > Tested-by tag to it. Thanks! > > The fix here is a bit confusing. It looks like this test is crafted to > fail because we take a sleeping ww_mutex inside a spinlock. But the > ww_mutex lockdep map is already initialized as LD_WAIT_SLEEP. How come > the first_lock_dep_map locking mode LD_WAIT_INV is used in the > ww_mutex_lock()? Is that because of the lockdep hlock refcounting? > No, it's not because of refcounting, actually in this case refcounting won't happen because there is a spin_lock sitting in between: held_locks stack: ww_lockdep_acquire ww_lockdep_mutex lock_A because there is a lock_A here, the following "if" will be false for ww_mutex_lock() in the test case: hlock = curr->held_locks + depth - 1; if (hlock->class_idx == class_idx && nest_lock) { The reason why the wait types of 'first_lock_dep_map' matter is because the lock class it shares with ww_mutex_lock() are registered at *acquire* time. So because we do ww_acquire_init(): ... lockdep_init_map(...); ... mutex_acquire_nest(...); ... ww_mutex_lock(): __mutex_lock_common(): mutex_acquire_nest(...); in the test case, these two mutex_acquire_nest()s use different lockdep_maps but share the same key, therefore whoever call mutex_acquire_nest() registers the lock class with its wait types. So even though first_lock_dep_map is a fake lock, it has to have the same wait types as a real mutex. Does this make sense? Regards, Boqun > Thanks, > Thomas > > > > > > > Regards, > > Boqun > > > > > Cc: Peter Zijlstra <peterz@infradead.org> > > > Cc: Ingo Molnar <mingo@redhat.com> > > > Cc: Will Deacon <will@kernel.org> > > > Cc: Waiman Long <longman@redhat.com> > > > Cc: Boqun Feng <boqun.feng@gmail.com> > > > Cc: Maarten Lankhorst <maarten@lankhorst.se> > > > Cc: Christian König <christian.koenig@amd.com> > > > Cc: dri-devel@lists.freedesktop.org > > > Cc: linux-kernel@vger.kernel.org > > > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > > > --- > > > include/linux/ww_mutex.h | 14 ++++++++++++++ > > > kernel/locking/test-ww_mutex.c | 8 +++++--- > > > 2 files changed, 19 insertions(+), 3 deletions(-) > > > > > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > > > index bb763085479a..a401a2f31a77 100644 > > > --- a/include/linux/ww_mutex.h > > > +++ b/include/linux/ww_mutex.h > > > @@ -65,6 +65,16 @@ struct ww_acquire_ctx { > > > #endif > > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > > struct lockdep_map dep_map; > > > + /** > > > + * @first_lock_dep_map: fake lockdep_map for first locked > > > ww_mutex. > > > + * > > > + * lockdep requires the lockdep_map for the first locked > > > ww_mutex > > > + * in a ww transaction to remain in memory until all > > > ww_mutexes of > > > + * the transaction have been unlocked. Ensure this by > > > keeping a > > > + * fake locked ww_mutex lockdep map between > > > ww_acquire_init() and > > > + * ww_acquire_fini(). > > > + */ > > > + struct lockdep_map first_lock_dep_map; > > > #endif > > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > > unsigned int deadlock_inject_interval; > > > @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct > > > ww_acquire_ctx *ctx, > > > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > > > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > > > &ww_class->acquire_key, 0); > > > + lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > > > >mutex_name, > > > + &ww_class->mutex_key, 0); > > > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > > > + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > > > >dep_map, _RET_IP_); > > > #endif > > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > > ctx->deadlock_inject_interval = 1; > > > @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct > > > ww_acquire_ctx *ctx) > > > static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) > > > { > > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > > + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); > > > mutex_release(&ctx->dep_map, _THIS_IP_); > > > #endif > > > #ifdef DEBUG_WW_MUTEXES > > > diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test- > > > ww_mutex.c > > > index 10a5736a21c2..5d58b2c0ef98 100644 > > > --- a/kernel/locking/test-ww_mutex.c > > > +++ b/kernel/locking/test-ww_mutex.c > > > @@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags) > > > int ret; > > > > > > ww_mutex_init(&mtx.mutex, &ww_class); > > > - ww_acquire_init(&ctx, &ww_class); > > > + if (flags & TEST_MTX_CTX) > > > + ww_acquire_init(&ctx, &ww_class); > > > > > > INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); > > > init_completion(&mtx.ready); > > > @@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags) > > > ret = wait_for_completion_timeout(&mtx.done, > > > TIMEOUT); > > > } > > > ww_mutex_unlock(&mtx.mutex); > > > - ww_acquire_fini(&ctx); > > > + if (flags & TEST_MTX_CTX) > > > + ww_acquire_fini(&ctx); > > > > > > if (ret) { > > > pr_err("%s(flags=%x): mutual exclusion failure\n", > > > @@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void) > > > if (ret) > > > return ret; > > > > > > - ret = stress(2047, hweight32(STRESS_ALL)*ncpus, > > > STRESS_ALL); > > > + ret = stress(2046, hweight32(STRESS_ALL)*ncpus, > > > STRESS_ALL); > > > if (ret) > > > return ret; > > > > > > -- > > > 2.46.0 > > > >
On Tue, 2024-10-15 at 11:00 -0700, Boqun Feng wrote: > On Tue, Oct 15, 2024 at 05:27:28PM +0200, Thomas Hellström wrote: > [..] > > > diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c > > > index 6f6a5fc85b42..6750321e3e9a 100644 > > > --- a/lib/locking-selftest.c > > > +++ b/lib/locking-selftest.c > > > @@ -1720,8 +1720,6 @@ static void ww_test_normal(void) > > > { > > > int ret; > > > > > > - WWAI(&t); > > > - > > > /* > > > * None of the ww_mutex codepaths should be taken in the > > > 'normal' > > > * mutex calls. The easiest way to verify this is by > > > using > > > the > > > @@ -1770,6 +1768,8 @@ static void ww_test_normal(void) > > > ww_mutex_base_unlock(&o.base); > > > WARN_ON(o.ctx != (void *)~0UL); > > > > > > + WWAI(&t); > > > + > > > /* nest_lock */ > > > o.ctx = (void *)~0UL; > > > ww_mutex_base_lock_nest_lock(&o.base, &t); > > > > > > Please confirm whether this change is intended. > > > > This fix looks correct and while this change was not intended, I > > think > > it makes sense and if this locking order is present in existing > > code > > apart from this selftest, it's probably easily fixable. > > > > > > > > The second is a case as follow: > > > > > > ww_acquire_init(...); > > > spin_lock(...); > > > ww_mutex_lock(...); // this should trigger a context > > > // invalidation. But the mutex was > > > // initialized by ww_acquire_init() > > > as a > > > // LD_WAIT_INV lock. > > > > > > The following could fix this: > > > > > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > > > index a401a2f31a77..45ff6f7a872b 100644 > > > --- a/include/linux/ww_mutex.h > > > +++ b/include/linux/ww_mutex.h > > > @@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct > > > ww_acquire_ctx *ctx, > > > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > > > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > > > &ww_class->acquire_key, 0); > > > - lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > > > > mutex_name, > > > - &ww_class->mutex_key, 0); > > > + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class- > > > > mutex_name, > > > + &ww_class->mutex_key, 0, > > > LD_WAIT_SLEEP); > > > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > > > mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > > > > dep_map, _RET_IP_); > > > #endif > > > > > > A v3 with all these fixed would look good to me, and I can add a > > > Tested-by tag to it. Thanks! > > > > The fix here is a bit confusing. It looks like this test is crafted > > to > > fail because we take a sleeping ww_mutex inside a spinlock. But the > > ww_mutex lockdep map is already initialized as LD_WAIT_SLEEP. How > > come > > the first_lock_dep_map locking mode LD_WAIT_INV is used in the > > ww_mutex_lock()? Is that because of the lockdep hlock refcounting? > > > > No, it's not because of refcounting, actually in this case > refcounting > won't happen because there is a spin_lock sitting in between: > > held_locks stack: > > ww_lockdep_acquire > ww_lockdep_mutex > lock_A > > because there is a lock_A here, the following "if" will be false for > ww_mutex_lock() in the test case: > > hlock = curr->held_locks + depth - 1; > if (hlock->class_idx == class_idx && nest_lock) { > > The reason why the wait types of 'first_lock_dep_map' matter is > because > the lock class it shares with ww_mutex_lock() are registered at > *acquire* time. So because we do > > ww_acquire_init(): > ... > lockdep_init_map(...); > ... > mutex_acquire_nest(...); > ... > ww_mutex_lock(): > __mutex_lock_common(): > mutex_acquire_nest(...); > > in the test case, these two mutex_acquire_nest()s use different > lockdep_maps but share the same key, therefore whoever call > mutex_acquire_nest() registers the lock class with its wait types. > > So even though first_lock_dep_map is a fake lock, it has to have the > same wait types as a real mutex. Understood. > > Does this make sense? Yes it does. I'll update to a v3, and add a Tested-by: tag. Would you like a Co-developed-by: tag as well? Thanks, Thomas > > Regards, > Boqun > > > Thanks, > > Thomas > > > > > > > > > > > > Regards, > > > Boqun > > > > > > > Cc: Peter Zijlstra <peterz@infradead.org> > > > > Cc: Ingo Molnar <mingo@redhat.com> > > > > Cc: Will Deacon <will@kernel.org> > > > > Cc: Waiman Long <longman@redhat.com> > > > > Cc: Boqun Feng <boqun.feng@gmail.com> > > > > Cc: Maarten Lankhorst <maarten@lankhorst.se> > > > > Cc: Christian König <christian.koenig@amd.com> > > > > Cc: dri-devel@lists.freedesktop.org > > > > Cc: linux-kernel@vger.kernel.org > > > > Signed-off-by: Thomas Hellström > > > > <thomas.hellstrom@linux.intel.com> > > > > --- > > > > include/linux/ww_mutex.h | 14 ++++++++++++++ > > > > kernel/locking/test-ww_mutex.c | 8 +++++--- > > > > 2 files changed, 19 insertions(+), 3 deletions(-) > > > > > > > > diff --git a/include/linux/ww_mutex.h > > > > b/include/linux/ww_mutex.h > > > > index bb763085479a..a401a2f31a77 100644 > > > > --- a/include/linux/ww_mutex.h > > > > +++ b/include/linux/ww_mutex.h > > > > @@ -65,6 +65,16 @@ struct ww_acquire_ctx { > > > > #endif > > > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > > > struct lockdep_map dep_map; > > > > + /** > > > > + * @first_lock_dep_map: fake lockdep_map for first > > > > locked > > > > ww_mutex. > > > > + * > > > > + * lockdep requires the lockdep_map for the first > > > > locked > > > > ww_mutex > > > > + * in a ww transaction to remain in memory until all > > > > ww_mutexes of > > > > + * the transaction have been unlocked. Ensure this by > > > > keeping a > > > > + * fake locked ww_mutex lockdep map between > > > > ww_acquire_init() and > > > > + * ww_acquire_fini(). > > > > + */ > > > > + struct lockdep_map first_lock_dep_map; > > > > #endif > > > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > > > unsigned int deadlock_inject_interval; > > > > @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct > > > > ww_acquire_ctx *ctx, > > > > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > > > > lockdep_init_map(&ctx->dep_map, ww_class- > > > > >acquire_name, > > > > &ww_class->acquire_key, 0); > > > > + lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > > > > > mutex_name, > > > > + &ww_class->mutex_key, 0); > > > > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > > > > + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, > > > > &ctx- > > > > > dep_map, _RET_IP_); > > > > #endif > > > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > > > ctx->deadlock_inject_interval = 1; > > > > @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct > > > > ww_acquire_ctx *ctx) > > > > static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) > > > > { > > > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > > > + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); > > > > mutex_release(&ctx->dep_map, _THIS_IP_); > > > > #endif > > > > #ifdef DEBUG_WW_MUTEXES > > > > diff --git a/kernel/locking/test-ww_mutex.c > > > > b/kernel/locking/test- > > > > ww_mutex.c > > > > index 10a5736a21c2..5d58b2c0ef98 100644 > > > > --- a/kernel/locking/test-ww_mutex.c > > > > +++ b/kernel/locking/test-ww_mutex.c > > > > @@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags) > > > > int ret; > > > > > > > > ww_mutex_init(&mtx.mutex, &ww_class); > > > > - ww_acquire_init(&ctx, &ww_class); > > > > + if (flags & TEST_MTX_CTX) > > > > + ww_acquire_init(&ctx, &ww_class); > > > > > > > > INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); > > > > init_completion(&mtx.ready); > > > > @@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags) > > > > ret = wait_for_completion_timeout(&mtx.done, > > > > TIMEOUT); > > > > } > > > > ww_mutex_unlock(&mtx.mutex); > > > > - ww_acquire_fini(&ctx); > > > > + if (flags & TEST_MTX_CTX) > > > > + ww_acquire_fini(&ctx); > > > > > > > > if (ret) { > > > > pr_err("%s(flags=%x): mutual exclusion > > > > failure\n", > > > > @@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void) > > > > if (ret) > > > > return ret; > > > > > > > > - ret = stress(2047, hweight32(STRESS_ALL)*ncpus, > > > > STRESS_ALL); > > > > + ret = stress(2046, hweight32(STRESS_ALL)*ncpus, > > > > STRESS_ALL); > > > > if (ret) > > > > return ret; > > > > > > > > -- > > > > 2.46.0 > > > > > >
On Wed, Oct 16, 2024 at 08:17:50AM +0200, Thomas Hellström wrote: [...] > > > > So even though first_lock_dep_map is a fake lock, it has to have the > > same wait types as a real mutex. > > Understood. > > > > Does this make sense? > > Yes it does. I'll update to a v3, and add a Tested-by: tag. Would you I think you should send a v3 then after I test, I will send my Tested-by. > like a Co-developed-by: tag as well? It's not a big change onto yours, but feel free to add it. Regards, Boqun > > Thanks, > Thomas > > [...]
On Wed, 2024-10-16 at 10:12 -0700, Boqun Feng wrote: > On Wed, Oct 16, 2024 at 08:17:50AM +0200, Thomas Hellström wrote: > [...] > > > > > > So even though first_lock_dep_map is a fake lock, it has to have > > > the > > > same wait types as a real mutex. > > > > Understood. > > > > > > Does this make sense? > > > > Yes it does. I'll update to a v3, and add a Tested-by: tag. Would > > you > > I think you should send a v3 then after I test, I will send my > Tested-by. > > > like a Co-developed-by: tag as well? > > It's not a big change onto yours, but feel free to add it. It would probably have taken me considerable time to find that missing LD_WAIT_SLEEP, but I forgot to ask for your S-O-B so I sent the patch without. Perhaps can be added at commit time if needed. Thanks again, Thomas > > Regards, > Boqun > > > > > Thanks, > > Thomas > > > > > [...]
Hi! On Mon, 2024-10-14 at 13:23 -0700, Boqun Feng wrote: > Hi Thomas, > > On Wed, Oct 09, 2024 at 11:20:31AM +0200, Thomas Hellström wrote: > > When using mutex_acquire_nest() with a nest_lock, lockdep refcounts > > the > > number of acquired lockdep_maps of mutexes of the same class, and > > also > > keeps a pointer to the first acquired lockdep_map of a class. That > > pointer > > is then used for various comparison-, printing- and checking > > purposes, > > but there is no mechanism to actively ensure that lockdep_map stays > > in > > memory. Instead, a warning is printed if the lockdep_map is freed > > and > > there are still held locks of the same lock class, even if the > > lockdep_map > > itself has been released. > > > > In the context of WW/WD transactions that means that if a user > > unlocks > > and frees a ww_mutex from within an ongoing ww transaction, and > > that > > mutex happens to be the first ww_mutex grabbed in the transaction, > > such a warning is printed and there might be a risk of a UAF. > > > > Note that this is only problem when lockdep is enabled and affects > > only > > dereferences of struct lockdep_map. > > > > Adjust to this by adding a fake lockdep_map to the acquired context > > and > > make sure it is the first acquired lockdep map of the associated > > ww_mutex class. Then hold it for the duration of the WW/WD > > transaction. > > > > This has the side effect that trying to lock a ww mutex *without* a > > ww_acquire_context but where a such context has been acquire, we'd > > see > > a lockdep splat. The test-ww_mutex.c selftest attempts to do that, > > so > > modify that particular test to not acquire a ww_acquire_context if > > it > > is not going to be used. > > > > v2: > > - Lower the number of locks in the test-ww_mutex > > stress(STRESS_ALL) test to accommodate the dummy lock > > introduced in this patch without overflowing lockdep held lock > > references. > > > > Have you tested your patch with lib/locking-selftests.c? It reported > two > errors for me: Let me take a look at these. Thanks for the report. /Thomas > > [..] | Wound/wait tests | > [..] --------------------- > [..] ww api failures: ok |FAILED| ok | > [..] ww contexts mixing: ok | ok | > [..] finishing ww context: ok | ok | ok > | ok | > [..] locking mismatches: ok | ok | ok | > [..] EDEADLK handling: ok | ok | ok > | ok | ok | ok | ok | ok | ok | ok | > [..] spinlock nest unlocked: ok | > [..] spinlock nest test: ok | > [..] ----------------------------------------------------- > [..] |block | try > |context| > [..] ----------------------------------------------------- > [..] context: ok | ok | ok | > [..] try: ok | ok | ok | > [..] block: ok | ok | ok | > [..] spinlock: ok | ok |FAILED| > > The first one is a use case issue, I think and can be fixed similar > to > your changes in test-ww_mutex.c: > > diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c > index 6f6a5fc85b42..6750321e3e9a 100644 > --- a/lib/locking-selftest.c > +++ b/lib/locking-selftest.c > @@ -1720,8 +1720,6 @@ static void ww_test_normal(void) > { > int ret; > > - WWAI(&t); > - > /* > * None of the ww_mutex codepaths should be taken in the > 'normal' > * mutex calls. The easiest way to verify this is by using > the > @@ -1770,6 +1768,8 @@ static void ww_test_normal(void) > ww_mutex_base_unlock(&o.base); > WARN_ON(o.ctx != (void *)~0UL); > > + WWAI(&t); > + > /* nest_lock */ > o.ctx = (void *)~0UL; > ww_mutex_base_lock_nest_lock(&o.base, &t); > > Please confirm whether this change is intended. > > The second is a case as follow: > > ww_acquire_init(...); > spin_lock(...); > ww_mutex_lock(...); // this should trigger a context > // invalidation. But the mutex was > // initialized by ww_acquire_init() as a > // LD_WAIT_INV lock. > > The following could fix this: > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > index a401a2f31a77..45ff6f7a872b 100644 > --- a/include/linux/ww_mutex.h > +++ b/include/linux/ww_mutex.h > @@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct > ww_acquire_ctx *ctx, > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > &ww_class->acquire_key, 0); > - lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > >mutex_name, > - &ww_class->mutex_key, 0); > + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class- > >mutex_name, > + &ww_class->mutex_key, 0, > LD_WAIT_SLEEP); > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > >dep_map, _RET_IP_); > #endif > > A v3 with all these fixed would look good to me, and I can add a > Tested-by tag to it. Thanks! > > Regards, > Boqun > > > Cc: Peter Zijlstra <peterz@infradead.org> > > Cc: Ingo Molnar <mingo@redhat.com> > > Cc: Will Deacon <will@kernel.org> > > Cc: Waiman Long <longman@redhat.com> > > Cc: Boqun Feng <boqun.feng@gmail.com> > > Cc: Maarten Lankhorst <maarten@lankhorst.se> > > Cc: Christian König <christian.koenig@amd.com> > > Cc: dri-devel@lists.freedesktop.org > > Cc: linux-kernel@vger.kernel.org > > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > > --- > > include/linux/ww_mutex.h | 14 ++++++++++++++ > > kernel/locking/test-ww_mutex.c | 8 +++++--- > > 2 files changed, 19 insertions(+), 3 deletions(-) > > > > diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h > > index bb763085479a..a401a2f31a77 100644 > > --- a/include/linux/ww_mutex.h > > +++ b/include/linux/ww_mutex.h > > @@ -65,6 +65,16 @@ struct ww_acquire_ctx { > > #endif > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > struct lockdep_map dep_map; > > + /** > > + * @first_lock_dep_map: fake lockdep_map for first locked > > ww_mutex. > > + * > > + * lockdep requires the lockdep_map for the first locked > > ww_mutex > > + * in a ww transaction to remain in memory until all > > ww_mutexes of > > + * the transaction have been unlocked. Ensure this by > > keeping a > > + * fake locked ww_mutex lockdep map between > > ww_acquire_init() and > > + * ww_acquire_fini(). > > + */ > > + struct lockdep_map first_lock_dep_map; > > #endif > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > unsigned int deadlock_inject_interval; > > @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct > > ww_acquire_ctx *ctx, > > debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); > > lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, > > &ww_class->acquire_key, 0); > > + lockdep_init_map(&ctx->first_lock_dep_map, ww_class- > > >mutex_name, > > + &ww_class->mutex_key, 0); > > mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); > > + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx- > > >dep_map, _RET_IP_); > > #endif > > #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH > > ctx->deadlock_inject_interval = 1; > > @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct > > ww_acquire_ctx *ctx) > > static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) > > { > > #ifdef CONFIG_DEBUG_LOCK_ALLOC > > + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); > > mutex_release(&ctx->dep_map, _THIS_IP_); > > #endif > > #ifdef DEBUG_WW_MUTEXES > > diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test- > > ww_mutex.c > > index 10a5736a21c2..5d58b2c0ef98 100644 > > --- a/kernel/locking/test-ww_mutex.c > > +++ b/kernel/locking/test-ww_mutex.c > > @@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags) > > int ret; > > > > ww_mutex_init(&mtx.mutex, &ww_class); > > - ww_acquire_init(&ctx, &ww_class); > > + if (flags & TEST_MTX_CTX) > > + ww_acquire_init(&ctx, &ww_class); > > > > INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); > > init_completion(&mtx.ready); > > @@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags) > > ret = wait_for_completion_timeout(&mtx.done, > > TIMEOUT); > > } > > ww_mutex_unlock(&mtx.mutex); > > - ww_acquire_fini(&ctx); > > + if (flags & TEST_MTX_CTX) > > + ww_acquire_fini(&ctx); > > > > if (ret) { > > pr_err("%s(flags=%x): mutual exclusion failure\n", > > @@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void) > > if (ret) > > return ret; > > > > - ret = stress(2047, hweight32(STRESS_ALL)*ncpus, > > STRESS_ALL); > > + ret = stress(2046, hweight32(STRESS_ALL)*ncpus, > > STRESS_ALL); > > if (ret) > > return ret; > > > > -- > > 2.46.0 > >
On Wed, Oct 09, 2024 at 11:20:31AM +0200, Thomas Hellström wrote: > When using mutex_acquire_nest() with a nest_lock, lockdep refcounts the > number of acquired lockdep_maps of mutexes of the same class, and also > keeps a pointer to the first acquired lockdep_map of a class. That pointer > is then used for various comparison-, printing- and checking purposes, > but there is no mechanism to actively ensure that lockdep_map stays in > memory. Instead, a warning is printed if the lockdep_map is freed and > there are still held locks of the same lock class, even if the lockdep_map > itself has been released. > > In the context of WW/WD transactions that means that if a user unlocks > and frees a ww_mutex from within an ongoing ww transaction, and that > mutex happens to be the first ww_mutex grabbed in the transaction, > such a warning is printed and there might be a risk of a UAF. > > Note that this is only problem when lockdep is enabled and affects only > dereferences of struct lockdep_map. > > Adjust to this by adding a fake lockdep_map to the acquired context and > make sure it is the first acquired lockdep map of the associated > ww_mutex class. Then hold it for the duration of the WW/WD transaction. > > This has the side effect that trying to lock a ww mutex *without* a > ww_acquire_context but where a such context has been acquire, we'd see > a lockdep splat. The test-ww_mutex.c selftest attempts to do that, so > modify that particular test to not acquire a ww_acquire_context if it > is not going to be used. > > v2: > - Lower the number of locks in the test-ww_mutex > stress(STRESS_ALL) test to accommodate the dummy lock > introduced in this patch without overflowing lockdep held lock > references. Thanks, I rebased tip/locking/core, which should now have this patch.
On Wed, 2024-10-09 at 15:10 +0200, Peter Zijlstra wrote: > On Wed, Oct 09, 2024 at 11:20:31AM +0200, Thomas Hellström wrote: > > When using mutex_acquire_nest() with a nest_lock, lockdep refcounts > > the > > number of acquired lockdep_maps of mutexes of the same class, and > > also > > keeps a pointer to the first acquired lockdep_map of a class. That > > pointer > > is then used for various comparison-, printing- and checking > > purposes, > > but there is no mechanism to actively ensure that lockdep_map stays > > in > > memory. Instead, a warning is printed if the lockdep_map is freed > > and > > there are still held locks of the same lock class, even if the > > lockdep_map > > itself has been released. > > > > In the context of WW/WD transactions that means that if a user > > unlocks > > and frees a ww_mutex from within an ongoing ww transaction, and > > that > > mutex happens to be the first ww_mutex grabbed in the transaction, > > such a warning is printed and there might be a risk of a UAF. > > > > Note that this is only problem when lockdep is enabled and affects > > only > > dereferences of struct lockdep_map. > > > > Adjust to this by adding a fake lockdep_map to the acquired context > > and > > make sure it is the first acquired lockdep map of the associated > > ww_mutex class. Then hold it for the duration of the WW/WD > > transaction. > > > > This has the side effect that trying to lock a ww mutex *without* a > > ww_acquire_context but where a such context has been acquire, we'd > > see > > a lockdep splat. The test-ww_mutex.c selftest attempts to do that, > > so > > modify that particular test to not acquire a ww_acquire_context if > > it > > is not going to be used. > > > > v2: > > - Lower the number of locks in the test-ww_mutex > > stress(STRESS_ALL) test to accommodate the dummy lock > > introduced in this patch without overflowing lockdep held lock > > references. > > Thanks, I rebased tip/locking/core, which should now have this patch. Thanks. It takes some time for that failing CI test to run, though so, and since I can't repro the failure locally I'll keep a watch out. /Thomas
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 823a566221a5639f6c69424897218e5d6431a970
Gitweb: https://git.kernel.org/tip/823a566221a5639f6c69424897218e5d6431a970
Author: Thomas Hellström <thomas.hellstrom@linux.intel.com>
AuthorDate: Wed, 09 Oct 2024 11:20:31 +02:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 09 Oct 2024 15:08:25 +02:00
locking/ww_mutex: Adjust to lockdep nest_lock requirements
When using mutex_acquire_nest() with a nest_lock, lockdep refcounts the
number of acquired lockdep_maps of mutexes of the same class, and also
keeps a pointer to the first acquired lockdep_map of a class. That pointer
is then used for various comparison-, printing- and checking purposes,
but there is no mechanism to actively ensure that lockdep_map stays in
memory. Instead, a warning is printed if the lockdep_map is freed and
there are still held locks of the same lock class, even if the lockdep_map
itself has been released.
In the context of WW/WD transactions that means that if a user unlocks
and frees a ww_mutex from within an ongoing ww transaction, and that
mutex happens to be the first ww_mutex grabbed in the transaction,
such a warning is printed and there might be a risk of a UAF.
Note that this is only problem when lockdep is enabled and affects only
dereferences of struct lockdep_map.
Adjust to this by adding a fake lockdep_map to the acquired context and
make sure it is the first acquired lockdep map of the associated
ww_mutex class. Then hold it for the duration of the WW/WD transaction.
This has the side effect that trying to lock a ww mutex *without* a
ww_acquire_context but where a such context has been acquire, we'd see
a lockdep splat. The test-ww_mutex.c selftest attempts to do that, so
modify that particular test to not acquire a ww_acquire_context if it
is not going to be used.
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20241009092031.6356-1-thomas.hellstrom@linux.intel.com
---
include/linux/ww_mutex.h | 14 ++++++++++++++
kernel/locking/test-ww_mutex.c | 8 +++++---
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index bb76308..a401a2f 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -65,6 +65,16 @@ struct ww_acquire_ctx {
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
+ /**
+ * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex.
+ *
+ * lockdep requires the lockdep_map for the first locked ww_mutex
+ * in a ww transaction to remain in memory until all ww_mutexes of
+ * the transaction have been unlocked. Ensure this by keeping a
+ * fake locked ww_mutex lockdep map between ww_acquire_init() and
+ * ww_acquire_fini().
+ */
+ struct lockdep_map first_lock_dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned int deadlock_inject_interval;
@@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
+ lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name,
+ &ww_class->mutex_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
+ mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
@@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
mutex_release(&ctx->dep_map, _THIS_IP_);
#endif
#ifdef DEBUG_WW_MUTEXES
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 10a5736..5d58b2c 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -62,7 +62,8 @@ static int __test_mutex(unsigned int flags)
int ret;
ww_mutex_init(&mtx.mutex, &ww_class);
- ww_acquire_init(&ctx, &ww_class);
+ if (flags & TEST_MTX_CTX)
+ ww_acquire_init(&ctx, &ww_class);
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
init_completion(&mtx.ready);
@@ -90,7 +91,8 @@ static int __test_mutex(unsigned int flags)
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
}
ww_mutex_unlock(&mtx.mutex);
- ww_acquire_fini(&ctx);
+ if (flags & TEST_MTX_CTX)
+ ww_acquire_fini(&ctx);
if (ret) {
pr_err("%s(flags=%x): mutual exclusion failure\n",
@@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
- ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
+ ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
if (ret)
return ret;
© 2016 - 2024 Red Hat, Inc.