rust/helpers/mutex.c | 5 +++++ rust/helpers/spinlock.c | 5 +++++ rust/kernel/sync/lock.rs | 10 ++++++++++ rust/kernel/sync/lock/mutex.rs | 7 +++++++ rust/kernel/sync/lock/spinlock.rs | 7 +++++++ 5 files changed, 34 insertions(+)
Since we've exposed Lock::from_raw() and Guard::new() publically, we want
to be able to make sure that we assert that a lock is actually held when
constructing a Guard for it to handle instances of unsafe Guard::new()
calls outside of our lock module.
So, let's do this by adding a new method assert_is_held to Backend, which
uses lockdep to check whether or not a lock has been acquired. When lockdep
is disabled, this has no overhead.
Signed-off-by: Lyude Paul <lyude@redhat.com>
---
V2
* Use lockdep instead of is_locked() functions
* Drop is_locked, replace with assert_is_held()
---
rust/helpers/mutex.c | 5 +++++
rust/helpers/spinlock.c | 5 +++++
rust/kernel/sync/lock.rs | 10 ++++++++++
rust/kernel/sync/lock/mutex.rs | 7 +++++++
rust/kernel/sync/lock/spinlock.rs | 7 +++++++
5 files changed, 34 insertions(+)
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
index 7e00680958ef1..06575553eda5c 100644
--- a/rust/helpers/mutex.c
+++ b/rust/helpers/mutex.c
@@ -12,3 +12,8 @@ void rust_helper___mutex_init(struct mutex *mutex, const char *name,
{
__mutex_init(mutex, name, key);
}
+
+void rust_helper_mutex_assert_is_held(struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
index b7b0945e8b3cb..b26953d0d65b5 100644
--- a/rust/helpers/spinlock.c
+++ b/rust/helpers/spinlock.c
@@ -26,3 +26,8 @@ int rust_helper_spin_trylock(spinlock_t *lock)
{
return spin_trylock(lock);
}
+
+void rust_helper_spin_assert_is_held(spinlock_t *lock)
+{
+ lockdep_assert_held(lock);
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 6d3c8874eb26a..bc71fda87b104 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -85,6 +85,13 @@ unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
// SAFETY: The safety requirements ensure that the lock is initialised.
*guard_state = unsafe { Self::lock(ptr) };
}
+
+ /// Asserts that the lock is held using lockdep.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn assert_is_held(ptr: *mut Self::State);
}
/// A mutual exclusion primitive.
@@ -207,6 +214,9 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
///
/// The caller must ensure that it owns the lock.
pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
+ unsafe { B::assert_is_held(lock.state.get()) };
+
Self {
lock,
state,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 0e946ebefce12..8ee079f6ae0ac 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -126,4 +126,11 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
None
}
}
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe {
+ bindings::mutex_assert_is_held(ptr);
+ }
+ }
}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 9f4d128bed983..b2c1343aabee3 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -125,4 +125,11 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
None
}
}
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe {
+ bindings::spin_assert_is_held(ptr);
+ }
+ }
}
base-commit: b7ed2b6f4e8d7f64649795e76ee9db67300de8eb
--
2.47.0
On Mon, Nov 25, 2024 at 03:40:58PM -0500, Lyude Paul wrote:
> Since we've exposed Lock::from_raw() and Guard::new() publically, we want
> to be able to make sure that we assert that a lock is actually held when
> constructing a Guard for it to handle instances of unsafe Guard::new()
> calls outside of our lock module.
>
> So, let's do this by adding a new method assert_is_held to Backend, which
> uses lockdep to check whether or not a lock has been acquired. When lockdep
> is disabled, this has no overhead.
>
> Signed-off-by: Lyude Paul <lyude@redhat.com>
>
Queued for more testing and reviews, thanks!
Regards,
Boqun
> ---
>
> V2
> * Use lockdep instead of is_locked() functions
> * Drop is_locked, replace with assert_is_held()
> ---
> rust/helpers/mutex.c | 5 +++++
> rust/helpers/spinlock.c | 5 +++++
> rust/kernel/sync/lock.rs | 10 ++++++++++
> rust/kernel/sync/lock/mutex.rs | 7 +++++++
> rust/kernel/sync/lock/spinlock.rs | 7 +++++++
> 5 files changed, 34 insertions(+)
>
> diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
> index 7e00680958ef1..06575553eda5c 100644
> --- a/rust/helpers/mutex.c
> +++ b/rust/helpers/mutex.c
> @@ -12,3 +12,8 @@ void rust_helper___mutex_init(struct mutex *mutex, const char *name,
> {
> __mutex_init(mutex, name, key);
> }
> +
> +void rust_helper_mutex_assert_is_held(struct mutex *mutex)
> +{
> + lockdep_assert_held(mutex);
> +}
> diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
> index b7b0945e8b3cb..b26953d0d65b5 100644
> --- a/rust/helpers/spinlock.c
> +++ b/rust/helpers/spinlock.c
> @@ -26,3 +26,8 @@ int rust_helper_spin_trylock(spinlock_t *lock)
> {
> return spin_trylock(lock);
> }
> +
> +void rust_helper_spin_assert_is_held(spinlock_t *lock)
> +{
> + lockdep_assert_held(lock);
> +}
> diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
> index 6d3c8874eb26a..bc71fda87b104 100644
> --- a/rust/kernel/sync/lock.rs
> +++ b/rust/kernel/sync/lock.rs
> @@ -85,6 +85,13 @@ unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
> // SAFETY: The safety requirements ensure that the lock is initialised.
> *guard_state = unsafe { Self::lock(ptr) };
> }
> +
> + /// Asserts that the lock is held using lockdep.
> + ///
> + /// # Safety
> + ///
> + /// Callers must ensure that [`Backend::init`] has been previously called.
> + unsafe fn assert_is_held(ptr: *mut Self::State);
> }
>
> /// A mutual exclusion primitive.
> @@ -207,6 +214,9 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
> ///
> /// The caller must ensure that it owns the lock.
> pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
> + // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
> + unsafe { B::assert_is_held(lock.state.get()) };
> +
> Self {
> lock,
> state,
> diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
> index 0e946ebefce12..8ee079f6ae0ac 100644
> --- a/rust/kernel/sync/lock/mutex.rs
> +++ b/rust/kernel/sync/lock/mutex.rs
> @@ -126,4 +126,11 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
> None
> }
> }
> +
> + unsafe fn assert_is_held(ptr: *mut Self::State) {
> + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
> + unsafe {
> + bindings::mutex_assert_is_held(ptr);
> + }
> + }
> }
> diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
> index 9f4d128bed983..b2c1343aabee3 100644
> --- a/rust/kernel/sync/lock/spinlock.rs
> +++ b/rust/kernel/sync/lock/spinlock.rs
> @@ -125,4 +125,11 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
> None
> }
> }
> +
> + unsafe fn assert_is_held(ptr: *mut Self::State) {
> + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
> + unsafe {
> + bindings::spin_assert_is_held(ptr);
> + }
> + }
> }
>
> base-commit: b7ed2b6f4e8d7f64649795e76ee9db67300de8eb
> --
> 2.47.0
>
The following commit has been merged into the locking/core branch of tip:
Commit-ID: fbd7a5a0359bc770e898d918d84977ea61163aad
Gitweb: https://git.kernel.org/tip/fbd7a5a0359bc770e898d918d84977ea61163aad
Author: Lyude Paul <lyude@redhat.com>
AuthorDate: Mon, 25 Nov 2024 15:40:58 -05:00
Committer: Boqun Feng <boqun.feng@gmail.com>
CommitterDate: Thu, 19 Dec 2024 14:04:42 -08:00
rust: sync: Add lock::Backend::assert_is_held()
Since we've exposed Lock::from_raw() and Guard::new() publically, we
want to be able to make sure that we assert that a lock is actually held
when constructing a Guard for it to handle instances of unsafe
Guard::new() calls outside of our lock module.
Hence add a new method assert_is_held() to Backend, which uses lockdep
to check whether or not a lock has been acquired. When lockdep is
disabled, this has no overhead.
[Boqun: Resolve the conflicts with exposing Guard::new(), reword the
commit log a bit and format "unsafe { <statement>; }" into "unsafe {
<statement> }" for the consistency. ]
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Link: https://lore.kernel.org/r/20241125204139.656801-1-lyude@redhat.com
---
rust/helpers/mutex.c | 5 +++++
rust/helpers/spinlock.c | 5 +++++
rust/kernel/sync/lock.rs | 10 ++++++++++
rust/kernel/sync/lock/mutex.rs | 5 +++++
rust/kernel/sync/lock/spinlock.rs | 5 +++++
5 files changed, 30 insertions(+)
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
index 7e00680..0657555 100644
--- a/rust/helpers/mutex.c
+++ b/rust/helpers/mutex.c
@@ -12,3 +12,8 @@ void rust_helper___mutex_init(struct mutex *mutex, const char *name,
{
__mutex_init(mutex, name, key);
}
+
+void rust_helper_mutex_assert_is_held(struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
index 5971fdf..42c4bf0 100644
--- a/rust/helpers/spinlock.c
+++ b/rust/helpers/spinlock.c
@@ -30,3 +30,8 @@ int rust_helper_spin_trylock(spinlock_t *lock)
{
return spin_trylock(lock);
}
+
+void rust_helper_spin_assert_is_held(spinlock_t *lock)
+{
+ lockdep_assert_held(lock);
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 72dbf3f..eb80048 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -90,6 +90,13 @@ pub unsafe trait Backend {
// SAFETY: The safety requirements ensure that the lock is initialised.
*guard_state = unsafe { Self::lock(ptr) };
}
+
+ /// Asserts that the lock is held using lockdep.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn assert_is_held(ptr: *mut Self::State);
}
/// A mutual exclusion primitive.
@@ -235,6 +242,9 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
///
/// The caller must ensure that it owns the lock.
pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
+ unsafe { B::assert_is_held(lock.state.get()) };
+
Self {
lock,
state,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 10a70c0..70cadbc 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -134,4 +134,9 @@ unsafe impl super::Backend for MutexBackend {
None
}
}
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe { bindings::mutex_assert_is_held(ptr) }
+ }
}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 081c022..ab2f8d0 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -133,4 +133,9 @@ unsafe impl super::Backend for SpinLockBackend {
None
}
}
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe { bindings::spin_assert_is_held(ptr) }
+ }
}
© 2016 - 2026 Red Hat, Inc.