[PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`

Onur Özkan posted 7 patches 5 months, 1 week ago
There is a newer version of this series
[PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`
Posted by Onur Özkan 5 months, 1 week ago
Includes full locking API (lock, try_lock, slow path, interruptible variants)
and integration with kernel bindings.

Also adds the `EDEADLK` error code to support deadlock detection.

Signed-off-by: Onur Özkan <work@onurozkan.dev>
---
 rust/kernel/error.rs              |   1 +
 rust/kernel/sync/lock/ww_mutex.rs | 289 +++++++++++++++++++++++++++++-
 2 files changed, 289 insertions(+), 1 deletion(-)

diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index a41de293dcd1..560de6117094 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -64,6 +64,7 @@ macro_rules! declare_err {
     declare_err!(EPIPE, "Broken pipe.");
     declare_err!(EDOM, "Math argument out of domain of func.");
     declare_err!(ERANGE, "Math result not representable.");
+    declare_err!(EDEADLK, "Resource deadlock avoided.");
     declare_err!(EOVERFLOW, "Value too large for defined data type.");
     declare_err!(ETIMEDOUT, "Connection timed out.");
     declare_err!(ERESTARTSYS, "Restart the system call.");
diff --git a/rust/kernel/sync/lock/ww_mutex.rs b/rust/kernel/sync/lock/ww_mutex.rs
index ca5b4525ace6..314360632953 100644
--- a/rust/kernel/sync/lock/ww_mutex.rs
+++ b/rust/kernel/sync/lock/ww_mutex.rs
@@ -10,8 +10,11 @@
 //! For more information: <https://docs.kernel.org/locking/ww-mutex-design.html>

 use crate::bindings;
+use crate::error::to_result;
 use crate::prelude::*;
-use crate::types::Opaque;
+use crate::types::{NotThreadSafe, Opaque};
+use core::cell::UnsafeCell;
+use core::marker::PhantomData;

 /// Create static [`WwClass`] instances.
 ///
@@ -134,3 +137,287 @@ pub fn new_wound_wait(name: &'static CStr) -> impl PinInit<Self> {
         Self::new(name, false)
     }
 }
+
+/// Groups multiple mutex acquisitions together for deadlock avoidance.
+///
+/// Must be used when acquiring multiple mutexes of the same class.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex};
+/// use kernel::c_str;
+/// use kernel::sync::Arc;
+/// use pin_init::stack_pin_init;
+///
+/// stack_pin_init!(let class = WwClass::new_wound_wait(c_str!("my_class")));
+///
+/// // Create mutexes.
+/// let mutex1 = Arc::pin_init(WwMutex::new(1, &class), GFP_KERNEL)?;
+/// let mutex2 = Arc::pin_init(WwMutex::new(2, &class), GFP_KERNEL)?;
+///
+/// // Create acquire context for deadlock avoidance.
+/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
+///
+/// // Acquire multiple locks safely.
+/// let guard1 = ctx.lock(&mutex1)?;
+/// let guard2 = ctx.lock(&mutex2)?;
+///
+/// // Mark acquisition phase as complete.
+/// ctx.done();
+///
+/// # Ok::<(), Error>(())
+/// ```
+#[pin_data(PinnedDrop)]
+pub struct WwAcquireCtx<'a> {
+    #[pin]
+    inner: Opaque<bindings::ww_acquire_ctx>,
+    _p: PhantomData<&'a WwClass>,
+}
+
+impl<'ww_class> WwAcquireCtx<'ww_class> {
+    /// Initializes `Self` with calling C side `ww_acquire_init` inside.
+    pub fn new(ww_class: &'ww_class WwClass) -> impl PinInit<Self> {
+        let class = ww_class.inner.get();
+        pin_init!(WwAcquireCtx {
+            inner <- Opaque::ffi_init(|slot: *mut bindings::ww_acquire_ctx| {
+                // SAFETY: `ww_class` is valid for the lifetime `'ww_class` captured by `Self`.
+                unsafe { bindings::ww_acquire_init(slot, class) }
+            }),
+            _p: PhantomData
+        })
+    }
+
+    /// Marks the end of the acquire phase.
+    ///
+    /// After calling this function, no more mutexes can be acquired with this context.
+    pub fn done(&self) {
+        // SAFETY: The context is pinned and valid.
+        unsafe { bindings::ww_acquire_done(self.inner.get()) };
+    }
+
+    /// Locks the given mutex.
+    pub fn lock<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) -> Result<WwMutexGuard<'a, T>> {
+        // SAFETY: The mutex is pinned and valid.
+        let ret = unsafe { bindings::ww_mutex_lock(ww_mutex.mutex.get(), self.inner.get()) };
+
+        to_result(ret)?;
+
+        Ok(WwMutexGuard::new(ww_mutex))
+    }
+
+    /// Similar to `lock`, but can be interrupted by signals.
+    pub fn lock_interruptible<'a, T>(
+        &'a self,
+        ww_mutex: &'a WwMutex<'a, T>,
+    ) -> Result<WwMutexGuard<'a, T>> {
+        // SAFETY: The mutex is pinned and valid.
+        let ret = unsafe {
+            bindings::ww_mutex_lock_interruptible(ww_mutex.mutex.get(), self.inner.get())
+        };
+
+        to_result(ret)?;
+
+        Ok(WwMutexGuard::new(ww_mutex))
+    }
+
+    /// Locks the given mutex using the slow path.
+    ///
+    /// This function should be used when `lock` fails (typically due to a potential deadlock).
+    pub fn lock_slow<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) -> Result<WwMutexGuard<'a, T>> {
+        // SAFETY: The mutex is pinned and valid, and we're in the slow path.
+        unsafe { bindings::ww_mutex_lock_slow(ww_mutex.mutex.get(), self.inner.get()) };
+
+        Ok(WwMutexGuard::new(ww_mutex))
+    }
+
+    /// Similar to `lock_slow`, but can be interrupted by signals.
+    pub fn lock_slow_interruptible<'a, T>(
+        &'a self,
+        ww_mutex: &'a WwMutex<'a, T>,
+    ) -> Result<WwMutexGuard<'a, T>> {
+        // SAFETY: The mutex is pinned and valid, and we are in the slow path.
+        let ret = unsafe {
+            bindings::ww_mutex_lock_slow_interruptible(ww_mutex.mutex.get(), self.inner.get())
+        };
+
+        to_result(ret)?;
+
+        Ok(WwMutexGuard::new(ww_mutex))
+    }
+
+    /// Tries to lock the mutex without blocking.
+    ///
+    /// Unlike `lock`, no deadlock handling is performed.
+    pub fn try_lock<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) -> Result<WwMutexGuard<'a, T>> {
+        // SAFETY: The mutex is pinned and valid.
+        let ret = unsafe { bindings::ww_mutex_trylock(ww_mutex.mutex.get(), self.inner.get()) };
+
+        if ret == 0 {
+            return Err(EBUSY);
+        } else {
+            to_result(ret)?;
+        }
+
+        Ok(WwMutexGuard::new(ww_mutex))
+    }
+}
+
+#[pinned_drop]
+impl PinnedDrop for WwAcquireCtx<'_> {
+    fn drop(self: Pin<&mut Self>) {
+        // SAFETY: The context is being dropped and is pinned.
+        unsafe { bindings::ww_acquire_fini(self.inner.get()) };
+    }
+}
+
+/// A wound/wait mutex backed with C side `ww_mutex`.
+///
+/// This is a mutual exclusion primitive that provides deadlock avoidance when
+/// acquiring multiple locks of the same class.
+///
+/// # Examples
+///
+/// ## Basic Usage
+///
+/// ```
+/// use kernel::c_str;
+/// use kernel::sync::Arc;
+/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex };
+/// use pin_init::stack_pin_init;
+///
+/// stack_pin_init!(let class = WwClass::new_wound_wait(c_str!("buffer_class")));
+/// let mutex = Arc::pin_init(WwMutex::new(42, &class), GFP_KERNEL)?;
+///
+/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
+///
+/// let guard = ctx.lock(&mutex)?;
+/// assert_eq!(*guard, 42);
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// ## Multiple Locks
+///
+/// ```
+/// use kernel::c_str;
+/// use kernel::prelude::*;
+/// use kernel::sync::Arc;
+/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex};
+/// use pin_init::stack_pin_init;
+///
+/// stack_pin_init!(let class = WwClass::new_wait_die(c_str!("resource_class")));
+/// let mutex_a = Arc::pin_init(WwMutex::new("Resource A", &class), GFP_KERNEL)?;
+/// let mutex_b = Arc::pin_init(WwMutex::new("Resource B", &class), GFP_KERNEL)?;
+///
+/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
+///
+/// // Try to acquire both locks.
+/// let guard_a = match ctx.lock(&mutex_a) {
+///     Ok(guard) => guard,
+///     Err(e) if e == EDEADLK => {
+///         // Deadlock detected, use slow path.
+///         ctx.lock_slow(&mutex_a)?
+///     }
+///     Err(e) => return Err(e),
+/// };
+///
+/// let guard_b = ctx.lock(&mutex_b)?;
+/// ctx.done();
+///
+/// # Ok::<(), Error>(())
+/// ```
+#[pin_data]
+pub struct WwMutex<'a, T: ?Sized> {
+    _p: PhantomData<&'a WwClass>,
+    #[pin]
+    mutex: Opaque<bindings::ww_mutex>,
+    data: UnsafeCell<T>,
+}
+
+// SAFETY: [`WwMutex`] can be shared between threads.
+unsafe impl<T: ?Sized + Send> Send for WwMutex<'_, T> {}
+// SAFETY: [`WwMutex`] can be safely accessed from multiple threads concurrently.
+unsafe impl<T: ?Sized + Send + Sync> Sync for WwMutex<'_, T> {}
+
+impl<'ww_class, T> WwMutex<'ww_class, T> {
+    /// Creates `Self` with calling `ww_mutex_init` inside.
+    pub fn new(t: T, ww_class: &'ww_class WwClass) -> impl PinInit<Self> {
+        let class = ww_class.inner.get();
+        pin_init!(WwMutex {
+            mutex <- Opaque::ffi_init(|slot: *mut bindings::ww_mutex| {
+                // SAFETY: `ww_class` is valid for the lifetime `'ww_class` captured by `Self`.
+                unsafe { bindings::ww_mutex_init(slot, class) }
+            }),
+            data: UnsafeCell::new(t),
+            _p: PhantomData,
+        })
+    }
+}
+
+impl<T: ?Sized> WwMutex<'_, T> {
+    /// Returns a raw pointer to the inner mutex.
+    fn as_ptr(&self) -> *mut bindings::ww_mutex {
+        self.mutex.get()
+    }
+
+    /// Checks if the mutex is currently locked.
+    ///
+    /// Intended for internal tests only and should not be used
+    /// anywhere else.
+    #[cfg(CONFIG_KUNIT)]
+    fn is_locked(&self) -> bool {
+        // SAFETY: The mutex is pinned and valid.
+        unsafe { bindings::ww_mutex_is_locked(self.mutex.get()) }
+    }
+}
+
+/// A guard that provides exclusive access to the data protected
+/// by a [`WwMutex`].
+///
+/// # Invariants
+///
+/// The guard holds an exclusive lock on the associated [`WwMutex`]. The lock is held
+/// for the entire lifetime of this guard and is automatically released when the
+/// guard is dropped.
+#[must_use = "the lock unlocks immediately when the guard is unused"]
+pub struct WwMutexGuard<'a, T: ?Sized> {
+    mutex: &'a WwMutex<'a, T>,
+    _not_send: NotThreadSafe,
+}
+
+// SAFETY: [`WwMutexGuard`] can be shared between threads if the data can.
+unsafe impl<T: ?Sized + Sync> Sync for WwMutexGuard<'_, T> {}
+
+impl<'a, T: ?Sized> WwMutexGuard<'a, T> {
+    /// Creates a new guard for a locked mutex.
+    fn new(mutex: &'a WwMutex<'a, T>) -> Self {
+        Self {
+            mutex,
+            _not_send: NotThreadSafe,
+        }
+    }
+}
+
+impl<T: ?Sized> core::ops::Deref for WwMutexGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: We hold the lock, so we have exclusive access.
+        unsafe { &*self.mutex.data.get() }
+    }
+}
+
+impl<T: ?Sized> core::ops::DerefMut for WwMutexGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        // SAFETY: We hold the lock, so we have exclusive access.
+        unsafe { &mut *self.mutex.data.get() }
+    }
+}
+
+impl<T: ?Sized> Drop for WwMutexGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: We hold the lock and are about to release it.
+        unsafe { bindings::ww_mutex_unlock(self.mutex.as_ptr()) };
+    }
+}
--
2.50.0

Re: [PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`
Posted by Daniel Almeida 5 months ago
Hi Onur,

I think this is starting to come together IMHO. Some comments inline.

> On 3 Sep 2025, at 10:13, Onur Özkan <work@onurozkan.dev> wrote:
> 
> Includes full locking API (lock, try_lock, slow path, interruptible variants)
> and integration with kernel bindings.
> 
> Also adds the `EDEADLK` error code to support deadlock detection.
> 
> Signed-off-by: Onur Özkan <work@onurozkan.dev>
> ---
> rust/kernel/error.rs              |   1 +
> rust/kernel/sync/lock/ww_mutex.rs | 289 +++++++++++++++++++++++++++++-
> 2 files changed, 289 insertions(+), 1 deletion(-)
> 
> diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
> index a41de293dcd1..560de6117094 100644
> --- a/rust/kernel/error.rs
> +++ b/rust/kernel/error.rs
> @@ -64,6 +64,7 @@ macro_rules! declare_err {
>     declare_err!(EPIPE, "Broken pipe.");
>     declare_err!(EDOM, "Math argument out of domain of func.");
>     declare_err!(ERANGE, "Math result not representable.");
> +    declare_err!(EDEADLK, "Resource deadlock avoided.");
>     declare_err!(EOVERFLOW, "Value too large for defined data type.");
>     declare_err!(ETIMEDOUT, "Connection timed out.");
>     declare_err!(ERESTARTSYS, "Restart the system call.");
> diff --git a/rust/kernel/sync/lock/ww_mutex.rs b/rust/kernel/sync/lock/ww_mutex.rs
> index ca5b4525ace6..314360632953 100644
> --- a/rust/kernel/sync/lock/ww_mutex.rs
> +++ b/rust/kernel/sync/lock/ww_mutex.rs
> @@ -10,8 +10,11 @@
> //! For more information: <https://docs.kernel.org/locking/ww-mutex-design.html>
> 
> use crate::bindings;
> +use crate::error::to_result;
> use crate::prelude::*;
> -use crate::types::Opaque;
> +use crate::types::{NotThreadSafe, Opaque};
> +use core::cell::UnsafeCell;
> +use core::marker::PhantomData;
> 
> /// Create static [`WwClass`] instances.
> ///
> @@ -134,3 +137,287 @@ pub fn new_wound_wait(name: &'static CStr) -> impl PinInit<Self> {
>         Self::new(name, false)
>     }
> }
> +
> +/// Groups multiple mutex acquisitions together for deadlock avoidance.
> +///
> +/// Must be used when acquiring multiple mutexes of the same class.
> +///
> +/// # Examples
> +///
> +/// ```
> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex};
> +/// use kernel::c_str;
> +/// use kernel::sync::Arc;
> +/// use pin_init::stack_pin_init;
> +///
> +/// stack_pin_init!(let class = WwClass::new_wound_wait(c_str!("my_class")));
> +///
> +/// // Create mutexes.
> +/// let mutex1 = Arc::pin_init(WwMutex::new(1, &class), GFP_KERNEL)?;
> +/// let mutex2 = Arc::pin_init(WwMutex::new(2, &class), GFP_KERNEL)?;
> +///
> +/// // Create acquire context for deadlock avoidance.
> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
> +///
> +/// // Acquire multiple locks safely.
> +/// let guard1 = ctx.lock(&mutex1)?;
> +/// let guard2 = ctx.lock(&mutex2)?;
> +///
> +/// // Mark acquisition phase as complete.
> +/// ctx.done();
> +///
> +/// # Ok::<(), Error>(())
> +/// ```
> +#[pin_data(PinnedDrop)]
> +pub struct WwAcquireCtx<'a> {

Please drop the Ww prefix. This is ww_mutex.rs <http://ww_mutex.rs/> after all.

> +    #[pin]
> +    inner: Opaque<bindings::ww_acquire_ctx>,
> +    _p: PhantomData<&'a WwClass>,
> +}
> +
> +impl<'ww_class> WwAcquireCtx<'ww_class> {
> +    /// Initializes `Self` with calling C side `ww_acquire_init` inside.
> +    pub fn new(ww_class: &'ww_class WwClass) -> impl PinInit<Self> {
> +        let class = ww_class.inner.get();
> +        pin_init!(WwAcquireCtx {
> +            inner <- Opaque::ffi_init(|slot: *mut bindings::ww_acquire_ctx| {
> +                // SAFETY: `ww_class` is valid for the lifetime `'ww_class` captured by `Self`.
> +                unsafe { bindings::ww_acquire_init(slot, class) }
> +            }),
> +            _p: PhantomData
> +        })
> +    }
> +
> +    /// Marks the end of the acquire phase.
> +    ///
> +    /// After calling this function, no more mutexes can be acquired with this context.
> +    pub fn done(&self) {
> +        // SAFETY: The context is pinned and valid.
> +        unsafe { bindings::ww_acquire_done(self.inner.get()) };
> +    }

This lets you call done() multiple times. We should probably use a typestate here.

> +
> +    /// Locks the given mutex.
> +    pub fn lock<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) -> Result<WwMutexGuard<'a, T>> {

> +        // SAFETY: The mutex is pinned and valid.
> +        let ret = unsafe { bindings::ww_mutex_lock(ww_mutex.mutex.get(), self.inner.get()) };
> +
> +        to_result(ret)?;
> +
> +        Ok(WwMutexGuard::new(ww_mutex))
> +    }
> +
> +    /// Similar to `lock`, but can be interrupted by signals.
> +    pub fn lock_interruptible<'a, T>(
> +        &'a self,
> +        ww_mutex: &'a WwMutex<'a, T>,
> +    ) -> Result<WwMutexGuard<'a, T>> {
> +        // SAFETY: The mutex is pinned and valid.
> +        let ret = unsafe {
> +            bindings::ww_mutex_lock_interruptible(ww_mutex.mutex.get(), self.inner.get())
> +        };
> +
> +        to_result(ret)?;
> +
> +        Ok(WwMutexGuard::new(ww_mutex))
> +    }
> +
> +    /// Locks the given mutex using the slow path.
> +    ///
> +    /// This function should be used when `lock` fails (typically due to a potential deadlock).
> +    pub fn lock_slow<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) -> Result<WwMutexGuard<'a, T>> {
> +        // SAFETY: The mutex is pinned and valid, and we're in the slow path.
> +        unsafe { bindings::ww_mutex_lock_slow(ww_mutex.mutex.get(), self.inner.get()) };
> +
> +        Ok(WwMutexGuard::new(ww_mutex))
> +    }
> +
> +    /// Similar to `lock_slow`, but can be interrupted by signals.
> +    pub fn lock_slow_interruptible<'a, T>(
> +        &'a self,
> +        ww_mutex: &'a WwMutex<'a, T>,
> +    ) -> Result<WwMutexGuard<'a, T>> {
> +        // SAFETY: The mutex is pinned and valid, and we are in the slow path.
> +        let ret = unsafe {
> +            bindings::ww_mutex_lock_slow_interruptible(ww_mutex.mutex.get(), self.inner.get())
> +        };
> +
> +        to_result(ret)?;
> +
> +        Ok(WwMutexGuard::new(ww_mutex))
> +    }
> +
> +    /// Tries to lock the mutex without blocking.
> +    ///
> +    /// Unlike `lock`, no deadlock handling is performed.
> +    pub fn try_lock<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) -> Result<WwMutexGuard<'a, T>> {
> +        // SAFETY: The mutex is pinned and valid.
> +        let ret = unsafe { bindings::ww_mutex_trylock(ww_mutex.mutex.get(), self.inner.get()) };
> +
> +        if ret == 0 {
> +            return Err(EBUSY);
> +        } else {
> +            to_result(ret)?;
> +        }
> +
> +        Ok(WwMutexGuard::new(ww_mutex))
> +    }
> +}
> +
> +#[pinned_drop]
> +impl PinnedDrop for WwAcquireCtx<'_> {
> +    fn drop(self: Pin<&mut Self>) {
> +        // SAFETY: The context is being dropped and is pinned.
> +        unsafe { bindings::ww_acquire_fini(self.inner.get()) };
> +    }
> +}
> +
> +/// A wound/wait mutex backed with C side `ww_mutex`.
> +///
> +/// This is a mutual exclusion primitive that provides deadlock avoidance when
> +/// acquiring multiple locks of the same class.

A link would be cool for the docs.

> +///
> +/// # Examples
> +///
> +/// ## Basic Usage
> +///
> +/// ```
> +/// use kernel::c_str;
> +/// use kernel::sync::Arc;
> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex };
> +/// use pin_init::stack_pin_init;
> +///
> +/// stack_pin_init!(let class = WwClass::new_wound_wait(c_str!("buffer_class")));
> +/// let mutex = Arc::pin_init(WwMutex::new(42, &class), GFP_KERNEL)?;
> +///
> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
> +///
> +/// let guard = ctx.lock(&mutex)?;
> +/// assert_eq!(*guard, 42);
> +///
> +/// # Ok::<(), Error>(())
> +/// ```
> +///
> +/// ## Multiple Locks
> +///
> +/// ```
> +/// use kernel::c_str;
> +/// use kernel::prelude::*;
> +/// use kernel::sync::Arc;
> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex};
> +/// use pin_init::stack_pin_init;
> +///
> +/// stack_pin_init!(let class = WwClass::new_wait_die(c_str!("resource_class")));
> +/// let mutex_a = Arc::pin_init(WwMutex::new("Resource A", &class), GFP_KERNEL)?;
> +/// let mutex_b = Arc::pin_init(WwMutex::new("Resource B", &class), GFP_KERNEL)?;
> +///
> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
> +///
> +/// // Try to acquire both locks.
> +/// let guard_a = match ctx.lock(&mutex_a) {
> +///     Ok(guard) => guard,
> +///     Err(e) if e == EDEADLK => {
> +///         // Deadlock detected, use slow path.

You must release all other locks before calling this, except there aren’t any taken in your example.

You should perhaps add a release_all() function to the context.

> +///         ctx.lock_slow(&mutex_a)?
> +///     }
> +///     Err(e) => return Err(e),
> +/// };
> +///
> +/// let guard_b = ctx.lock(&mutex_b)?;
> +/// ctx.done();
> +///
> +/// # Ok::<(), Error>(())
> +/// ```
> +#[pin_data]
> +pub struct WwMutex<'a, T: ?Sized> {
> +    _p: PhantomData<&'a WwClass>,

Make the PhantomData last, please.

> +    #[pin]
> +    mutex: Opaque<bindings::ww_mutex>,
> +    data: UnsafeCell<T>,
> +}
> +
> +// SAFETY: [`WwMutex`] can be shared between threads.
> +unsafe impl<T: ?Sized + Send> Send for WwMutex<'_, T> {}

“Send” does not share anything. When you send something, some other
thread has it, and you don’t have it anymore.

Blank here.

> +// SAFETY: [`WwMutex`] can be safely accessed from multiple threads concurrently.
> +unsafe impl<T: ?Sized + Send + Sync> Sync for WwMutex<'_, T> {}
> +
> +impl<'ww_class, T> WwMutex<'ww_class, T> {
> +    /// Creates `Self` with calling `ww_mutex_init` inside.

^ This does not parse very well.

> +    pub fn new(t: T, ww_class: &'ww_class WwClass) -> impl PinInit<Self> {

Please rename “t” to “data”.

> +        let class = ww_class.inner.get();
> +        pin_init!(WwMutex {
> +            mutex <- Opaque::ffi_init(|slot: *mut bindings::ww_mutex| {
> +                // SAFETY: `ww_class` is valid for the lifetime `'ww_class` captured by `Self`.
> +                unsafe { bindings::ww_mutex_init(slot, class) }
> +            }),
> +            data: UnsafeCell::new(t),
> +            _p: PhantomData,
> +        })
> +    }
> +}
> +
> +impl<T: ?Sized> WwMutex<'_, T> {

I wonder why we need this ?Sized here?

> +    /// Returns a raw pointer to the inner mutex.
> +    fn as_ptr(&self) -> *mut bindings::ww_mutex {
> +        self.mutex.get()
> +    }
> +
> +    /// Checks if the mutex is currently locked.
> +    ///
> +    /// Intended for internal tests only and should not be used
> +    /// anywhere else.

Why?

> +    #[cfg(CONFIG_KUNIT)]
> +    fn is_locked(&self) -> bool {

I’d recommend removing this CONFIG_KUNIT and making this pub. You can see
that there are users for this function in the C code, like for example,
dma_resv_is_locked().

> +        // SAFETY: The mutex is pinned and valid.
> +        unsafe { bindings::ww_mutex_is_locked(self.mutex.get()) }
> +    }
> +}
> +
> +/// A guard that provides exclusive access to the data protected
> +/// by a [`WwMutex`].
> +///
> +/// # Invariants
> +///
> +/// The guard holds an exclusive lock on the associated [`WwMutex`]. The lock is held
> +/// for the entire lifetime of this guard and is automatically released when the
> +/// guard is dropped.
> +#[must_use = "the lock unlocks immediately when the guard is unused"]
> +pub struct WwMutexGuard<'a, T: ?Sized> {
> +    mutex: &'a WwMutex<'a, T>,
> +    _not_send: NotThreadSafe,
> +}
> +
> +// SAFETY: [`WwMutexGuard`] can be shared between threads if the data can.
> +unsafe impl<T: ?Sized + Sync> Sync for WwMutexGuard<'_, T> {}
> +
> +impl<'a, T: ?Sized> WwMutexGuard<'a, T> {
> +    /// Creates a new guard for a locked mutex.
> +    fn new(mutex: &'a WwMutex<'a, T>) -> Self {
> +        Self {
> +            mutex,
> +            _not_send: NotThreadSafe,
> +        }
> +    }
> +}
> +
> +impl<T: ?Sized> core::ops::Deref for WwMutexGuard<'_, T> {
> +    type Target = T;
> +
> +    fn deref(&self) -> &Self::Target {
> +        // SAFETY: We hold the lock, so we have exclusive access.
> +        unsafe { &*self.mutex.data.get() }
> +    }
> +}
> +
> +impl<T: ?Sized> core::ops::DerefMut for WwMutexGuard<'_, T> {
> +    fn deref_mut(&mut self) -> &mut Self::Target {
> +        // SAFETY: We hold the lock, so we have exclusive access.
> +        unsafe { &mut *self.mutex.data.get() }
> +    }

We need to add a bound on Unpin. See [0].

> +}
> +
> +impl<T: ?Sized> Drop for WwMutexGuard<'_, T> {
> +    fn drop(&mut self) {
> +        // SAFETY: We hold the lock and are about to release it.
> +        unsafe { bindings::ww_mutex_unlock(self.mutex.as_ptr()) };
> +    }
> +}
> —
> 2.50.0
> 
> 

[0]: https://lore.kernel.org/rust-for-linux/20250828-lock-t-when-t-is-pinned-v2-1-b067c4b93fd6@collabora.com/
Re: [PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`
Posted by Onur 5 months ago
On Fri, 5 Sep 2025 15:49:57 -0300
Daniel Almeida <daniel.almeida@collabora.com> wrote:

> Hi Onur,
> 
> I think this is starting to come together IMHO. Some comments inline.
> 
> > On 3 Sep 2025, at 10:13, Onur Özkan <work@onurozkan.dev> wrote:
> > 
> > Includes full locking API (lock, try_lock, slow path, interruptible
> > variants) and integration with kernel bindings.
> > 
> > Also adds the `EDEADLK` error code to support deadlock detection.
> > 
> > Signed-off-by: Onur Özkan <work@onurozkan.dev>
> > ---
> > rust/kernel/error.rs              |   1 +
> > rust/kernel/sync/lock/ww_mutex.rs | 289
> > +++++++++++++++++++++++++++++- 2 files changed, 289 insertions(+),
> > 1 deletion(-)
> > 
> > diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
> > index a41de293dcd1..560de6117094 100644
> > --- a/rust/kernel/error.rs
> > +++ b/rust/kernel/error.rs
> > @@ -64,6 +64,7 @@ macro_rules! declare_err {
> >     declare_err!(EPIPE, "Broken pipe.");
> >     declare_err!(EDOM, "Math argument out of domain of func.");
> >     declare_err!(ERANGE, "Math result not representable.");
> > +    declare_err!(EDEADLK, "Resource deadlock avoided.");
> >     declare_err!(EOVERFLOW, "Value too large for defined data
> > type."); declare_err!(ETIMEDOUT, "Connection timed out.");
> >     declare_err!(ERESTARTSYS, "Restart the system call.");
> > diff --git a/rust/kernel/sync/lock/ww_mutex.rs
> > b/rust/kernel/sync/lock/ww_mutex.rs index
> > ca5b4525ace6..314360632953 100644 ---
> > a/rust/kernel/sync/lock/ww_mutex.rs +++
> > b/rust/kernel/sync/lock/ww_mutex.rs @@ -10,8 +10,11 @@
> > //! For more information:
> > <https://docs.kernel.org/locking/ww-mutex-design.html>
> > 
> > use crate::bindings;
> > +use crate::error::to_result;
> > use crate::prelude::*;
> > -use crate::types::Opaque;
> > +use crate::types::{NotThreadSafe, Opaque};
> > +use core::cell::UnsafeCell;
> > +use core::marker::PhantomData;
> > 
> > /// Create static [`WwClass`] instances.
> > ///
> > @@ -134,3 +137,287 @@ pub fn new_wound_wait(name: &'static CStr) ->
> > impl PinInit<Self> { Self::new(name, false)
> >     }
> > }
> > +
> > +/// Groups multiple mutex acquisitions together for deadlock
> > avoidance. +///
> > +/// Must be used when acquiring multiple mutexes of the same class.
> > +///
> > +/// # Examples
> > +///
> > +/// ```
> > +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> > WwMutex}; +/// use kernel::c_str;
> > +/// use kernel::sync::Arc;
> > +/// use pin_init::stack_pin_init;
> > +///
> > +/// stack_pin_init!(let class =
> > WwClass::new_wound_wait(c_str!("my_class"))); +///
> > +/// // Create mutexes.
> > +/// let mutex1 = Arc::pin_init(WwMutex::new(1, &class),
> > GFP_KERNEL)?; +/// let mutex2 = Arc::pin_init(WwMutex::new(2,
> > &class), GFP_KERNEL)?; +///
> > +/// // Create acquire context for deadlock avoidance.
> > +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class),
> > GFP_KERNEL)?; +///
> > +/// // Acquire multiple locks safely.
> > +/// let guard1 = ctx.lock(&mutex1)?;
> > +/// let guard2 = ctx.lock(&mutex2)?;
> > +///
> > +/// // Mark acquisition phase as complete.
> > +/// ctx.done();
> > +///
> > +/// # Ok::<(), Error>(())
> > +/// ```
> > +#[pin_data(PinnedDrop)]
> > +pub struct WwAcquireCtx<'a> {
> 
> Please drop the Ww prefix. This is ww_mutex.rs <http://ww_mutex.rs/>
> after all.
> 
> > +    #[pin]
> > +    inner: Opaque<bindings::ww_acquire_ctx>,
> > +    _p: PhantomData<&'a WwClass>,
> > +}
> > +
> > +impl<'ww_class> WwAcquireCtx<'ww_class> {
> > +    /// Initializes `Self` with calling C side `ww_acquire_init`
> > inside.
> > +    pub fn new(ww_class: &'ww_class WwClass) -> impl PinInit<Self>
> > {
> > +        let class = ww_class.inner.get();
> > +        pin_init!(WwAcquireCtx {
> > +            inner <- Opaque::ffi_init(|slot: *mut
> > bindings::ww_acquire_ctx| {
> > +                // SAFETY: `ww_class` is valid for the lifetime
> > `'ww_class` captured by `Self`.
> > +                unsafe { bindings::ww_acquire_init(slot, class) }
> > +            }),
> > +            _p: PhantomData
> > +        })
> > +    }
> > +
> > +    /// Marks the end of the acquire phase.
> > +    ///
> > +    /// After calling this function, no more mutexes can be
> > acquired with this context.
> > +    pub fn done(&self) {
> > +        // SAFETY: The context is pinned and valid.
> > +        unsafe { bindings::ww_acquire_done(self.inner.get()) };
> > +    }
> 
> This lets you call done() multiple times. We should probably use a
> typestate here.
> 

Good point. I will see what I can do.

> > +
> > +    /// Locks the given mutex.
> > +    pub fn lock<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>) ->
> > Result<WwMutexGuard<'a, T>> {
> 
> > +        // SAFETY: The mutex is pinned and valid.
> > +        let ret = unsafe {
> > bindings::ww_mutex_lock(ww_mutex.mutex.get(), self.inner.get()) }; +
> > +        to_result(ret)?;
> > +
> > +        Ok(WwMutexGuard::new(ww_mutex))
> > +    }
> > +
> > +    /// Similar to `lock`, but can be interrupted by signals.
> > +    pub fn lock_interruptible<'a, T>(
> > +        &'a self,
> > +        ww_mutex: &'a WwMutex<'a, T>,
> > +    ) -> Result<WwMutexGuard<'a, T>> {
> > +        // SAFETY: The mutex is pinned and valid.
> > +        let ret = unsafe {
> > +
> > bindings::ww_mutex_lock_interruptible(ww_mutex.mutex.get(),
> > self.inner.get())
> > +        };
> > +
> > +        to_result(ret)?;
> > +
> > +        Ok(WwMutexGuard::new(ww_mutex))
> > +    }
> > +
> > +    /// Locks the given mutex using the slow path.
> > +    ///
> > +    /// This function should be used when `lock` fails (typically
> > due to a potential deadlock).
> > +    pub fn lock_slow<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a,
> > T>) -> Result<WwMutexGuard<'a, T>> {
> > +        // SAFETY: The mutex is pinned and valid, and we're in the
> > slow path.
> > +        unsafe {
> > bindings::ww_mutex_lock_slow(ww_mutex.mutex.get(),
> > self.inner.get()) }; +
> > +        Ok(WwMutexGuard::new(ww_mutex))
> > +    }
> > +
> > +    /// Similar to `lock_slow`, but can be interrupted by signals.
> > +    pub fn lock_slow_interruptible<'a, T>(
> > +        &'a self,
> > +        ww_mutex: &'a WwMutex<'a, T>,
> > +    ) -> Result<WwMutexGuard<'a, T>> {
> > +        // SAFETY: The mutex is pinned and valid, and we are in
> > the slow path.
> > +        let ret = unsafe {
> > +
> > bindings::ww_mutex_lock_slow_interruptible(ww_mutex.mutex.get(),
> > self.inner.get())
> > +        };
> > +
> > +        to_result(ret)?;
> > +
> > +        Ok(WwMutexGuard::new(ww_mutex))
> > +    }
> > +
> > +    /// Tries to lock the mutex without blocking.
> > +    ///
> > +    /// Unlike `lock`, no deadlock handling is performed.
> > +    pub fn try_lock<'a, T>(&'a self, ww_mutex: &'a WwMutex<'a, T>)
> > -> Result<WwMutexGuard<'a, T>> {
> > +        // SAFETY: The mutex is pinned and valid.
> > +        let ret = unsafe {
> > bindings::ww_mutex_trylock(ww_mutex.mutex.get(), self.inner.get())
> > }; +
> > +        if ret == 0 {
> > +            return Err(EBUSY);
> > +        } else {
> > +            to_result(ret)?;
> > +        }
> > +
> > +        Ok(WwMutexGuard::new(ww_mutex))
> > +    }
> > +}
> > +
> > +#[pinned_drop]
> > +impl PinnedDrop for WwAcquireCtx<'_> {
> > +    fn drop(self: Pin<&mut Self>) {
> > +        // SAFETY: The context is being dropped and is pinned.
> > +        unsafe { bindings::ww_acquire_fini(self.inner.get()) };
> > +    }
> > +}
> > +
> > +/// A wound/wait mutex backed with C side `ww_mutex`.
> > +///
> > +/// This is a mutual exclusion primitive that provides deadlock
> > avoidance when +/// acquiring multiple locks of the same class.
> 
> A link would be cool for the docs.
> 
> > +///
> > +/// # Examples
> > +///
> > +/// ## Basic Usage
> > +///
> > +/// ```
> > +/// use kernel::c_str;
> > +/// use kernel::sync::Arc;
> > +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> > WwMutex }; +/// use pin_init::stack_pin_init;
> > +///
> > +/// stack_pin_init!(let class =
> > WwClass::new_wound_wait(c_str!("buffer_class"))); +/// let mutex =
> > Arc::pin_init(WwMutex::new(42, &class), GFP_KERNEL)?; +///
> > +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class),
> > GFP_KERNEL)?; +///
> > +/// let guard = ctx.lock(&mutex)?;
> > +/// assert_eq!(*guard, 42);
> > +///
> > +/// # Ok::<(), Error>(())
> > +/// ```
> > +///
> > +/// ## Multiple Locks
> > +///
> > +/// ```
> > +/// use kernel::c_str;
> > +/// use kernel::prelude::*;
> > +/// use kernel::sync::Arc;
> > +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> > WwMutex}; +/// use pin_init::stack_pin_init;
> > +///
> > +/// stack_pin_init!(let class =
> > WwClass::new_wait_die(c_str!("resource_class"))); +/// let mutex_a
> > = Arc::pin_init(WwMutex::new("Resource A", &class), GFP_KERNEL)?;
> > +/// let mutex_b = Arc::pin_init(WwMutex::new("Resource B",
> > &class), GFP_KERNEL)?; +/// +/// let ctx =
> > KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?; +///
> > +/// // Try to acquire both locks.
> > +/// let guard_a = match ctx.lock(&mutex_a) {
> > +///     Ok(guard) => guard,
> > +///     Err(e) if e == EDEADLK => {
> > +///         // Deadlock detected, use slow path.
> 
> You must release all other locks before calling this, except there
> aren’t any taken in your example.
> 

True, thanks!

> You should perhaps add a release_all() function to the context.
> 

Do you mean we should track guards in `WwAcquireCtx` wrapper? That
would drop the need for `ExecContext` (mostly), which is a good idea
IMO.

> > +///         ctx.lock_slow(&mutex_a)?
> > +///     }
> > +///     Err(e) => return Err(e),
> > +/// };
> > +///
> > +/// let guard_b = ctx.lock(&mutex_b)?;
> > +/// ctx.done();
> > +///
> > +/// # Ok::<(), Error>(())
> > +/// ```
> > +#[pin_data]
> > +pub struct WwMutex<'a, T: ?Sized> {
> > +    _p: PhantomData<&'a WwClass>,
> 
> Make the PhantomData last, please.
> 
> > +    #[pin]
> > +    mutex: Opaque<bindings::ww_mutex>,
> > +    data: UnsafeCell<T>,
> > +}
> > +
> > +// SAFETY: [`WwMutex`] can be shared between threads.
> > +unsafe impl<T: ?Sized + Send> Send for WwMutex<'_, T> {}
> 
> “Send” does not share anything. When you send something, some other
> thread has it, and you don’t have it anymore.
> 
> Blank here.
> 
> > +// SAFETY: [`WwMutex`] can be safely accessed from multiple
> > threads concurrently. +unsafe impl<T: ?Sized + Send + Sync> Sync
> > for WwMutex<'_, T> {} +
> > +impl<'ww_class, T> WwMutex<'ww_class, T> {
> > +    /// Creates `Self` with calling `ww_mutex_init` inside.
> 
> ^ This does not parse very well.
> 
> > +    pub fn new(t: T, ww_class: &'ww_class WwClass) -> impl
> > PinInit<Self> {
> 
> Please rename “t” to “data”.
> 
> > +        let class = ww_class.inner.get();
> > +        pin_init!(WwMutex {
> > +            mutex <- Opaque::ffi_init(|slot: *mut
> > bindings::ww_mutex| {
> > +                // SAFETY: `ww_class` is valid for the lifetime
> > `'ww_class` captured by `Self`.
> > +                unsafe { bindings::ww_mutex_init(slot, class) }
> > +            }),
> > +            data: UnsafeCell::new(t),
> > +            _p: PhantomData,
> > +        })
> > +    }
> > +}
> > +
> > +impl<T: ?Sized> WwMutex<'_, T> {
> 
> I wonder why we need this ?Sized here?
> 

If I recall it correctly it was required by `self.mutex.get()` call. I
will re-check when I am working for the next patch.

> > +    /// Returns a raw pointer to the inner mutex.
> > +    fn as_ptr(&self) -> *mut bindings::ww_mutex {
> > +        self.mutex.get()
> > +    }
> > +
> > +    /// Checks if the mutex is currently locked.
> > +    ///
> > +    /// Intended for internal tests only and should not be used
> > +    /// anywhere else.
> 
> Why?
> 

It was requested by Boqun and Peter here [0].

[0]:
https://lore.kernel.org/rust-for-linux/aFReIdlPPg4MmaYX@tardis.local/

> > +    #[cfg(CONFIG_KUNIT)]
> > +    fn is_locked(&self) -> bool {
> 
> I’d recommend removing this CONFIG_KUNIT and making this pub. You can
> see that there are users for this function in the C code, like for
> example, dma_resv_is_locked().
> 
> > +        // SAFETY: The mutex is pinned and valid.
> > +        unsafe { bindings::ww_mutex_is_locked(self.mutex.get()) }
> > +    }
> > +}
> > +
> > +/// A guard that provides exclusive access to the data protected
> > +/// by a [`WwMutex`].
> > +///
> > +/// # Invariants
> > +///
> > +/// The guard holds an exclusive lock on the associated
> > [`WwMutex`]. The lock is held +/// for the entire lifetime of this
> > guard and is automatically released when the +/// guard is dropped.
> > +#[must_use = "the lock unlocks immediately when the guard is
> > unused"] +pub struct WwMutexGuard<'a, T: ?Sized> {
> > +    mutex: &'a WwMutex<'a, T>,
> > +    _not_send: NotThreadSafe,
> > +}
> > +
> > +// SAFETY: [`WwMutexGuard`] can be shared between threads if the
> > data can. +unsafe impl<T: ?Sized + Sync> Sync for WwMutexGuard<'_,
> > T> {} +
> > +impl<'a, T: ?Sized> WwMutexGuard<'a, T> {
> > +    /// Creates a new guard for a locked mutex.
> > +    fn new(mutex: &'a WwMutex<'a, T>) -> Self {
> > +        Self {
> > +            mutex,
> > +            _not_send: NotThreadSafe,
> > +        }
> > +    }
> > +}
> > +
> > +impl<T: ?Sized> core::ops::Deref for WwMutexGuard<'_, T> {
> > +    type Target = T;
> > +
> > +    fn deref(&self) -> &Self::Target {
> > +        // SAFETY: We hold the lock, so we have exclusive access.
> > +        unsafe { &*self.mutex.data.get() }
> > +    }
> > +}
> > +
> > +impl<T: ?Sized> core::ops::DerefMut for WwMutexGuard<'_, T> {
> > +    fn deref_mut(&mut self) -> &mut Self::Target {
> > +        // SAFETY: We hold the lock, so we have exclusive access.
> > +        unsafe { &mut *self.mutex.data.get() }
> > +    }
> 
> We need to add a bound on Unpin. See [0].
> 
> > +}
> > +
> > +impl<T: ?Sized> Drop for WwMutexGuard<'_, T> {
> > +    fn drop(&mut self) {
> > +        // SAFETY: We hold the lock and are about to release it.
> > +        unsafe { bindings::ww_mutex_unlock(self.mutex.as_ptr()) };
> > +    }
> > +}
> > —
> > 2.50.0
> > 
> > 
> 
> [0]:
> https://lore.kernel.org/rust-for-linux/20250828-lock-t-when-t-is-pinned-v2-1-b067c4b93fd6@collabora.com/
> 
Re: [PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`
Posted by Daniel Almeida 5 months ago
[…]

> 
>> +///
>> +/// # Examples
>> +///
>> +/// ## Basic Usage
>> +///
>> +/// ```
>> +/// use kernel::c_str;
>> +/// use kernel::sync::Arc;
>> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex };
>> +/// use pin_init::stack_pin_init;
>> +///
>> +/// stack_pin_init!(let class = WwClass::new_wound_wait(c_str!("buffer_class")));
>> +/// let mutex = Arc::pin_init(WwMutex::new(42, &class), GFP_KERNEL)?;
>> +///
>> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
>> +///
>> +/// let guard = ctx.lock(&mutex)?;
>> +/// assert_eq!(*guard, 42);
>> +///
>> +/// # Ok::<(), Error>(())
>> +/// ```
>> +///
>> +/// ## Multiple Locks
>> +///
>> +/// ```
>> +/// use kernel::c_str;
>> +/// use kernel::prelude::*;
>> +/// use kernel::sync::Arc;
>> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx, WwMutex};
>> +/// use pin_init::stack_pin_init;
>> +///
>> +/// stack_pin_init!(let class = WwClass::new_wait_die(c_str!("resource_class")));
>> +/// let mutex_a = Arc::pin_init(WwMutex::new("Resource A", &class), GFP_KERNEL)?;
>> +/// let mutex_b = Arc::pin_init(WwMutex::new("Resource B", &class), GFP_KERNEL)?;
>> +///
>> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?;
>> +///
>> +/// // Try to acquire both locks.
>> +/// let guard_a = match ctx.lock(&mutex_a) {
>> +///     Ok(guard) => guard,
>> +///     Err(e) if e == EDEADLK => {
>> +///         // Deadlock detected, use slow path.
> 
> You must release all other locks before calling this, except there aren’t any taken in your example.
> 
> You should perhaps add a release_all() function to the context.
> 

By the way, if we need a context in the first place to lock, it makes sense to
release_all() once this context is dropped.

— Daniel
Re: [PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`
Posted by Onur Özkan 3 months, 2 weeks ago
On Fri, 5 Sep 2025 16:03:02 -0300
Daniel Almeida <daniel.almeida@collabora.com> wrote:

> […]
> 
> > 
> >> +///
> >> +/// # Examples
> >> +///
> >> +/// ## Basic Usage
> >> +///
> >> +/// ```
> >> +/// use kernel::c_str;
> >> +/// use kernel::sync::Arc;
> >> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> >> WwMutex }; +/// use pin_init::stack_pin_init;
> >> +///
> >> +/// stack_pin_init!(let class =
> >> WwClass::new_wound_wait(c_str!("buffer_class"))); +/// let mutex =
> >> Arc::pin_init(WwMutex::new(42, &class), GFP_KERNEL)?; +///
> >> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class),
> >> GFP_KERNEL)?; +///
> >> +/// let guard = ctx.lock(&mutex)?;
> >> +/// assert_eq!(*guard, 42);
> >> +///
> >> +/// # Ok::<(), Error>(())
> >> +/// ```
> >> +///
> >> +/// ## Multiple Locks
> >> +///
> >> +/// ```
> >> +/// use kernel::c_str;
> >> +/// use kernel::prelude::*;
> >> +/// use kernel::sync::Arc;
> >> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> >> WwMutex}; +/// use pin_init::stack_pin_init;
> >> +///
> >> +/// stack_pin_init!(let class =
> >> WwClass::new_wait_die(c_str!("resource_class"))); +/// let mutex_a
> >> = Arc::pin_init(WwMutex::new("Resource A", &class), GFP_KERNEL)?;
> >> +/// let mutex_b = Arc::pin_init(WwMutex::new("Resource B",
> >> &class), GFP_KERNEL)?; +/// +/// let ctx =
> >> KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?; +///
> >> +/// // Try to acquire both locks.
> >> +/// let guard_a = match ctx.lock(&mutex_a) {
> >> +///     Ok(guard) => guard,
> >> +///     Err(e) if e == EDEADLK => {
> >> +///         // Deadlock detected, use slow path.
> > 
> > You must release all other locks before calling this, except there
> > aren’t any taken in your example.
> > 
> > You should perhaps add a release_all() function to the context.
> > 
> 
> By the way, if we need a context in the first place to lock, it makes
> sense to release_all() once this context is dropped.

I am going to need to implement release_all anyway since the C code [1]
requires that all locks be released before calling ww_acquire_fini
(which we already do in PinnedDrop and now in reinit (a new function)).

To make that work, we need to keep track of the locks which is what
ExecContext is currently doing. So I will probably just drop ExecContext
entirely and fold that logic into WwAcquireCtx instead (like I
mentioned a while back) for the next version in this week.

[1]:
https://github.com/torvalds/linux/blob/552c50713f27/include/linux/ww_mutex.h#L195-L196

Regards,
Onur 

> 
> — Daniel
> 
> 
> 
Re: [PATCH v6 3/7] rust: implement `WwMutex`, `WwAcquireCtx` and `WwMutexGuard`
Posted by Onur 5 months ago
On Fri, 5 Sep 2025 16:03:02 -0300
Daniel Almeida <daniel.almeida@collabora.com> wrote:

> […]
> 
> > 
> >> +///
> >> +/// # Examples
> >> +///
> >> +/// ## Basic Usage
> >> +///
> >> +/// ```
> >> +/// use kernel::c_str;
> >> +/// use kernel::sync::Arc;
> >> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> >> WwMutex }; +/// use pin_init::stack_pin_init;
> >> +///
> >> +/// stack_pin_init!(let class =
> >> WwClass::new_wound_wait(c_str!("buffer_class"))); +/// let mutex =
> >> Arc::pin_init(WwMutex::new(42, &class), GFP_KERNEL)?; +///
> >> +/// let ctx = KBox::pin_init(WwAcquireCtx::new(&class),
> >> GFP_KERNEL)?; +///
> >> +/// let guard = ctx.lock(&mutex)?;
> >> +/// assert_eq!(*guard, 42);
> >> +///
> >> +/// # Ok::<(), Error>(())
> >> +/// ```
> >> +///
> >> +/// ## Multiple Locks
> >> +///
> >> +/// ```
> >> +/// use kernel::c_str;
> >> +/// use kernel::prelude::*;
> >> +/// use kernel::sync::Arc;
> >> +/// use kernel::sync::lock::ww_mutex::{WwClass, WwAcquireCtx,
> >> WwMutex}; +/// use pin_init::stack_pin_init;
> >> +///
> >> +/// stack_pin_init!(let class =
> >> WwClass::new_wait_die(c_str!("resource_class"))); +/// let mutex_a
> >> = Arc::pin_init(WwMutex::new("Resource A", &class), GFP_KERNEL)?;
> >> +/// let mutex_b = Arc::pin_init(WwMutex::new("Resource B",
> >> &class), GFP_KERNEL)?; +/// +/// let ctx =
> >> KBox::pin_init(WwAcquireCtx::new(&class), GFP_KERNEL)?; +///
> >> +/// // Try to acquire both locks.
> >> +/// let guard_a = match ctx.lock(&mutex_a) {
> >> +///     Ok(guard) => guard,
> >> +///     Err(e) if e == EDEADLK => {
> >> +///         // Deadlock detected, use slow path.
> > 
> > You must release all other locks before calling this, except there
> > aren’t any taken in your example.
> > 
> > You should perhaps add a release_all() function to the context.
> > 
> 
> By the way, if we need a context in the first place to lock, it makes
> sense to release_all() once this context is dropped.
> 

Very true... Did you like the idea about embedding `ExecContext` cores
inside `WwAcquireCtx` ? It will still be optional to have `EDEADLK`
handling logic, but it will have better release mechanism on drop.

> — Daniel
> 
>