[PATCH v2 3/3] rust: xarray: add `insert` and `reserve`

Tamir Duberstein posted 3 patches 2 months, 3 weeks ago
[PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Tamir Duberstein 2 months, 3 weeks ago
Add `Guard::{insert,reserve}` and `Guard::{insert,reserve}_limit`, which
are akin to `__xa_{alloc,insert}` in C.

Note that unlike `xa_reserve` which only ensures that memory is
allocated, the semantics of `Reservation` are stricter and require
precise management of the reservation. Indices which have been reserved
can still be overwritten with `Guard::store`, which allows for C-like
semantics if desired.

`__xa_cmpxchg_raw` is exported to facilitate the semantics described
above.

Tested-by: Janne Grunau <j@jannau.net>
Reviewed-by: Janne Grunau <j@jannau.net>
Signed-off-by: Tamir Duberstein <tamird@gmail.com>
---
 include/linux/xarray.h |   2 +
 lib/xarray.c           |  28 ++-
 rust/helpers/xarray.c  |   5 +
 rust/kernel/xarray.rs  | 494 +++++++++++++++++++++++++++++++++++++++++++++++--
 4 files changed, 512 insertions(+), 17 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index be850174e802..64f2a5e06ceb 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -563,6 +563,8 @@ void *__xa_erase(struct xarray *, unsigned long index);
 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
 		void *entry, gfp_t);
+void *__xa_cmpxchg_raw(struct xarray *, unsigned long index, void *old,
+		void *entry, gfp_t);
 int __must_check __xa_insert(struct xarray *, unsigned long index,
 		void *entry, gfp_t);
 int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
diff --git a/lib/xarray.c b/lib/xarray.c
index 76dde3a1cacf..58202b6fbb59 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1738,9 +1738,6 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
 }
 EXPORT_SYMBOL(xa_store);
 
-static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
-			void *old, void *entry, gfp_t gfp);
-
 /**
  * __xa_cmpxchg() - Conditionally replace an entry in the XArray.
  * @xa: XArray.
@@ -1767,7 +1764,29 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
 }
 EXPORT_SYMBOL(__xa_cmpxchg);
 
-static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+/**
+ * __xa_cmpxchg_raw() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * This function is the same as __xa_cmpxchg() except that it does not coerce
+ * XA_ZERO_ENTRY to NULL on egress.
+ *
+ * Context: Any context.  Expects xa_lock to be held on entry.  May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
 			void *old, void *entry, gfp_t gfp)
 {
 	XA_STATE(xas, xa, index);
@@ -1787,6 +1806,7 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
 
 	return xas_result(&xas, curr);
 }
+EXPORT_SYMBOL(__xa_cmpxchg_raw);
 
 /**
  * __xa_insert() - Store this entry in the XArray if no entry is present.
diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c
index 60b299f11451..b6c078e6a343 100644
--- a/rust/helpers/xarray.c
+++ b/rust/helpers/xarray.c
@@ -2,6 +2,11 @@
 
 #include <linux/xarray.h>
 
+void *rust_helper_xa_zero_entry(void)
+{
+	return XA_ZERO_ENTRY;
+}
+
 int rust_helper_xa_err(void *entry)
 {
 	return xa_err(entry);
diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
index 101f61c0362d..a43414bb4d7e 100644
--- a/rust/kernel/xarray.rs
+++ b/rust/kernel/xarray.rs
@@ -9,7 +9,12 @@
     prelude::*,
     types::{ForeignOwnable, NotThreadSafe, Opaque},
 };
-use core::{iter, marker::PhantomData, mem, ptr::NonNull};
+use core::{
+    fmt, iter,
+    marker::PhantomData,
+    mem, ops,
+    ptr::{null_mut, NonNull},
+};
 
 /// An array which efficiently maps sparse integer indices to owned objects.
 ///
@@ -25,29 +30,81 @@
 ///
 /// ```rust
 /// # use kernel::alloc::KBox;
-/// # use kernel::xarray::XArray;
+/// # use kernel::xarray::{StoreError, XArray};
 /// # use pin_init::stack_pin_init;
 ///
 /// stack_pin_init!(let xa = XArray::new(Default::default()));
 ///
 /// let dead = KBox::new(0xdead, GFP_KERNEL)?;
 /// let beef = KBox::new(0xbeef, GFP_KERNEL)?;
+/// let leet = KBox::new(0x1337, GFP_KERNEL)?;
+///
+/// let mut guard = xa.lock();
+///
+/// let index = guard.insert_limit(.., dead, GFP_KERNEL)?;
+///
+/// assert_eq!(guard.get(index), Some(&0xdead));
+///
+/// let beef = {
+///     let ret = guard.insert(index, beef, GFP_KERNEL);
+///     assert!(ret.is_err());
+///     let StoreError { value, error } = ret.unwrap_err();
+///     assert_eq!(error, EBUSY);
+///     value
+/// };
+///
+/// let reservation = guard.reserve_limit(.., GFP_KERNEL);
+/// assert!(reservation.is_ok());
+/// let reservation1 = reservation.unwrap();
+/// let reservation = guard.reserve_limit(.., GFP_KERNEL);
+/// assert!(reservation.is_ok());
+/// let reservation2 = reservation.unwrap();
+///
+/// assert_eq!(reservation1.index(), index + 1);
+/// assert_eq!(reservation2.index(), index + 2);
+///
+/// let dead = {
+///     let ret = guard.remove(index);
+///     assert!(ret.is_some());
+///     ret.unwrap()
+/// };
+/// assert_eq!(*dead, 0xdead);
+///
+/// drop(guard); // Reservations can outlive the guard.
+///
+/// let () = reservation1.fill(dead)?;
+///
+/// let index = reservation2.index();
 ///
 /// let mut guard = xa.lock();
 ///
-/// assert_eq!(guard.get(0), None);
+/// let beef = {
+///     let ret = guard.insert(index, beef, GFP_KERNEL);
+///     assert!(ret.is_err());
+///     let StoreError { value, error } = ret.unwrap_err();
+///     assert_eq!(error, EBUSY);
+///     value
+/// };
 ///
-/// assert_eq!(guard.store(0, dead, GFP_KERNEL)?.as_deref(), None);
-/// assert_eq!(guard.get(0).copied(), Some(0xdead));
+/// // `store` ignores reservations.
+/// {
+///    let ret = guard.store(index, beef, GFP_KERNEL);
+///    assert!(ret.is_ok());
+///    assert_eq!(ret.unwrap().as_deref(), None);
+/// }
 ///
-/// *guard.get_mut(0).unwrap() = 0xffff;
-/// assert_eq!(guard.get(0).copied(), Some(0xffff));
+/// assert_eq!(guard.get(index), Some(&0xbeef));
 ///
-/// assert_eq!(guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), Some(0xffff));
-/// assert_eq!(guard.get(0).copied(), Some(0xbeef));
+/// // We trampled the reservation above, so filling should fail.
+/// let leet = {
+///    let ret = reservation2.fill_locked(&mut guard, leet);
+///    assert!(ret.is_err());
+///    let StoreError { value, error } = ret.unwrap_err();
+///    assert_eq!(error, EBUSY);
+///    value
+/// };
 ///
-/// guard.remove(0);
-/// assert_eq!(guard.get(0), None);
+/// assert_eq!(guard.get(index), Some(&0xbeef));
 ///
 /// # Ok::<(), Error>(())
 /// ```
@@ -126,6 +183,19 @@ fn iter(&self) -> impl Iterator<Item = NonNull<T::PointedTo>> + '_ {
         .map_while(|ptr| NonNull::new(ptr.cast()))
     }
 
+    fn with_guard<F, U>(&self, guard: Option<&mut Guard<'_, T>>, f: F) -> U
+    where
+        F: FnOnce(&mut Guard<'_, T>) -> U,
+    {
+        match guard {
+            None => f(&mut self.lock()),
+            Some(guard) => {
+                assert_eq!(guard.xa.xa.get(), self.xa.get());
+                f(guard)
+            }
+        }
+    }
+
     /// Attempts to lock the [`XArray`] for exclusive access.
     pub fn try_lock(&self) -> Option<Guard<'_, T>> {
         // SAFETY: `self.xa` is always valid by the type invariant.
@@ -172,6 +242,7 @@ fn drop(&mut self) {
 /// The error returned by [`store`](Guard::store).
 ///
 /// Contains the underlying error and the value that was not stored.
+#[derive(Debug)]
 pub struct StoreError<T> {
     /// The error that occurred.
     pub error: Error,
@@ -185,6 +256,11 @@ fn from(value: StoreError<T>) -> Self {
     }
 }
 
+fn to_usize(i: u32) -> usize {
+    i.try_into()
+        .unwrap_or_else(|_| build_error!("cannot convert u32 to usize"))
+}
+
 impl<'a, T: ForeignOwnable> Guard<'a, T> {
     fn load<F, U>(&self, index: usize, f: F) -> Option<U>
     where
@@ -219,7 +295,7 @@ pub fn remove(&mut self, index: usize) -> Option<T> {
         // - The caller holds the lock.
         let ptr = unsafe { bindings::__xa_erase(self.xa.xa.get(), index) }.cast();
         // SAFETY:
-        // - `ptr` is either NULL or came from `T::into_foreign`.
+        // - `ptr` is either `NULL` or came from `T::into_foreign`.
         // - `&mut self` guarantees that the lifetimes of [`T::Borrowed`] and [`T::BorrowedMut`]
         // borrowed from `self` have ended.
         unsafe { T::try_from_foreign(ptr) }
@@ -267,13 +343,272 @@ pub fn store(
             })
         } else {
             let old = old.cast();
-            // SAFETY: `ptr` is either NULL or came from `T::into_foreign`.
+            // SAFETY: `ptr` is either `NULL` or came from `T::into_foreign`.
             //
             // NB: `XA_ZERO_ENTRY` is never returned by functions belonging to the Normal XArray
             // API; such entries present as `NULL`.
             Ok(unsafe { T::try_from_foreign(old) })
         }
     }
+
+    /// Stores an element at the given index if no entry is present.
+    ///
+    /// May drop the lock if needed to allocate memory, and then reacquire it afterwards.
+    ///
+    /// On failure, returns the element which was attempted to be stored.
+    pub fn insert(
+        &mut self,
+        index: usize,
+        value: T,
+        gfp: alloc::Flags,
+    ) -> Result<(), StoreError<T>> {
+        build_assert!(
+            mem::align_of::<T::PointedTo>() >= 4,
+            "pointers stored in XArray must be 4-byte aligned"
+        );
+        let ptr = value.into_foreign();
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        //
+        // INVARIANT: `ptr` came from `T::into_foreign`.
+        match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr.cast(), gfp.as_raw()) } {
+            0 => Ok(()),
+            errno => {
+                // SAFETY: `ptr` came from `T::into_foreign` and `__xa_insert` does not take
+                // ownership of the value on error.
+                let value = unsafe { T::from_foreign(ptr) };
+                Err(StoreError {
+                    value,
+                    error: Error::from_errno(errno),
+                })
+            }
+        }
+    }
+
+    /// Stores an element somewhere in the given range of indices.
+    ///
+    /// On success, takes ownership of `ptr`.
+    ///
+    /// On failure, ownership returns to the caller.
+    ///
+    /// # Safety
+    ///
+    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
+    unsafe fn alloc(
+        &mut self,
+        limit: impl ops::RangeBounds<u32>,
+        ptr: *mut T::PointedTo,
+        gfp: alloc::Flags,
+    ) -> Result<usize> {
+        // NB: `xa_limit::{max,min}` are inclusive.
+        let limit = bindings::xa_limit {
+            max: match limit.end_bound() {
+                ops::Bound::Included(&end) => end,
+                ops::Bound::Excluded(&end) => end - 1,
+                ops::Bound::Unbounded => u32::MAX,
+            },
+            min: match limit.start_bound() {
+                ops::Bound::Included(&start) => start,
+                ops::Bound::Excluded(&start) => start + 1,
+                ops::Bound::Unbounded => 0,
+            },
+        };
+
+        let mut index = u32::MAX;
+
+        // SAFETY:
+        // - `self.xa` is always valid by the type invariant.
+        // - `self.xa` was initialized with `XA_FLAGS_ALLOC` or `XA_FLAGS_ALLOC1`.
+        //
+        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign`.
+        match unsafe {
+            bindings::__xa_alloc(
+                self.xa.xa.get(),
+                &mut index,
+                ptr.cast(),
+                limit,
+                gfp.as_raw(),
+            )
+        } {
+            0 => Ok(to_usize(index)),
+            errno => Err(Error::from_errno(errno)),
+        }
+    }
+
+    /// Allocates an entry somewhere in the array.
+    ///
+    /// On success, returns the index at which the entry was stored.
+    ///
+    /// On failure, returns the entry which was attempted to be stored.
+    pub fn insert_limit(
+        &mut self,
+        limit: impl ops::RangeBounds<u32>,
+        value: T,
+        gfp: alloc::Flags,
+    ) -> Result<usize, StoreError<T>> {
+        build_assert!(
+            mem::align_of::<T::PointedTo>() >= 4,
+            "pointers stored in XArray must be 4-byte aligned"
+        );
+        let ptr = value.into_foreign();
+        // SAFETY: `ptr` came from `T::into_foreign`.
+        unsafe { self.alloc(limit, ptr, gfp) }.map_err(|error| {
+            // SAFETY: `ptr` came from `T::into_foreign` and `self.alloc` does not take ownership of
+            // the value on error.
+            let value = unsafe { T::from_foreign(ptr) };
+            StoreError { value, error }
+        })
+    }
+
+    /// Reserves an entry in the array.
+    pub fn reserve(&mut self, index: usize, gfp: alloc::Flags) -> Result<Reservation<'a, T>> {
+        // NB: `__xa_insert` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress.
+        let ptr = null_mut();
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        //
+        // INVARIANT: `ptr` is `NULL`.
+        match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr, gfp.as_raw()) } {
+            0 => Ok(Reservation { xa: self.xa, index }),
+            errno => Err(Error::from_errno(errno)),
+        }
+    }
+
+    /// Reserves an entry somewhere in the array.
+    pub fn reserve_limit(
+        &mut self,
+        limit: impl ops::RangeBounds<u32>,
+        gfp: alloc::Flags,
+    ) -> Result<Reservation<'a, T>> {
+        // NB: `__xa_alloc` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress.
+        let ptr = null_mut();
+        // SAFETY: `ptr` is `NULL`.
+        unsafe { self.alloc(limit, ptr, gfp) }.map(|index| Reservation { xa: self.xa, index })
+    }
+}
+
+/// A reserved slot in an array.
+///
+/// The slot is released when the reservation goes out of scope.
+///
+/// Note that the array lock *must not* be held when the reservation is filled or dropped as this
+/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be
+/// used in context where the array lock is held.
+#[must_use = "the reservation is released immediately when the reservation is unused"]
+pub struct Reservation<'a, T: ForeignOwnable> {
+    xa: &'a XArray<T>,
+    index: usize,
+}
+
+impl<T: ForeignOwnable> fmt::Debug for Reservation<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Reservation")
+            .field("index", &self.index())
+            .finish()
+    }
+}
+
+impl<T: ForeignOwnable> Reservation<'_, T> {
+    /// Returns the index of the reservation.
+    pub fn index(&self) -> usize {
+        self.index
+    }
+
+    /// Replaces the reserved entry with the given entry.
+    ///
+    /// # Safety
+    ///
+    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
+    unsafe fn replace(guard: &mut Guard<'_, T>, index: usize, ptr: *mut T::PointedTo) -> Result {
+        // SAFETY: `xa_zero_entry` wraps `XA_ZERO_ENTRY` which is always safe to use.
+        let old = unsafe { bindings::xa_zero_entry() };
+
+        // NB: `__xa_cmpxchg_raw` is used over `__xa_cmpxchg` because the latter coerces
+        // `XA_ZERO_ENTRY` to `NULL` on egress, which would prevent us from determining whether a
+        // replacement was made.
+        //
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        //
+        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign` and `old` is
+        // `XA_ZERO_ENTRY`.
+        let ret =
+            unsafe { bindings::__xa_cmpxchg_raw(guard.xa.xa.get(), index, old, ptr.cast(), 0) };
+
+        // SAFETY: `__xa_cmpxchg_raw` returns the old entry at this index on success or `xa_err` if
+        // an error happened.
+        match unsafe { bindings::xa_err(ret) } {
+            0 => {
+                if ret == old {
+                    Ok(())
+                } else {
+                    Err(EBUSY)
+                }
+            }
+            errno => Err(Error::from_errno(errno)),
+        }
+    }
+
+    fn fill_inner(&self, guard: Option<&mut Guard<'_, T>>, value: T) -> Result<(), StoreError<T>> {
+        let Self { xa, index } = self;
+        let index = *index;
+
+        let ptr = value.into_foreign();
+        xa.with_guard(guard, |guard| {
+            // SAFETY: `ptr` came from `T::into_foreign`.
+            unsafe { Self::replace(guard, index, ptr) }
+        })
+        .map_err(|error| {
+            // SAFETY: `ptr` came from `T::into_foreign` and `Self::replace` does not take ownership
+            // of the value on error.
+            let value = unsafe { T::from_foreign(ptr) };
+            StoreError { value, error }
+        })
+    }
+
+    /// Fills the reservation.
+    pub fn fill(self, value: T) -> Result<(), StoreError<T>> {
+        let result = self.fill_inner(None, value);
+        mem::forget(self);
+        result
+    }
+
+    /// Fills the reservation without acquiring the array lock.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the passed guard locks a different array.
+    pub fn fill_locked(self, guard: &mut Guard<'_, T>, value: T) -> Result<(), StoreError<T>> {
+        let result = self.fill_inner(Some(guard), value);
+        mem::forget(self);
+        result
+    }
+
+    fn release_inner(&self, guard: Option<&mut Guard<'_, T>>) -> Result {
+        let Self { xa, index } = self;
+        let index = *index;
+
+        xa.with_guard(guard, |guard| {
+            let ptr = null_mut();
+            // SAFETY: `ptr` is `NULL`.
+            unsafe { Self::replace(guard, index, ptr) }
+        })
+    }
+
+    /// Releases the reservation without acquiring the array lock.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the passed guard locks a different array.
+    pub fn release_locked(self, guard: &mut Guard<'_, T>) -> Result {
+        let result = self.release_inner(Some(guard));
+        mem::forget(self);
+        result
+    }
+}
+
+impl<T: ForeignOwnable> Drop for Reservation<'_, T> {
+    fn drop(&mut self) {
+        // NB: Errors here are possible since `Guard::store` does not honor reservations.
+        let _: Result = self.release_inner(None);
+    }
 }
 
 // SAFETY: `XArray<T>` has no shared mutable state so it is `Send` iff `T` is `Send`.
@@ -282,3 +617,136 @@ unsafe impl<T: ForeignOwnable + Send> Send for XArray<T> {}
 // SAFETY: `XArray<T>` serialises the interior mutability it provides so it is `Sync` iff `T` is
 // `Send`.
 unsafe impl<T: ForeignOwnable + Send> Sync for XArray<T> {}
+
+#[macros::kunit_tests(rust_xarray_kunit)]
+mod tests {
+    use super::*;
+    use pin_init::stack_pin_init;
+
+    fn new_kbox<T>(value: T) -> Result<KBox<T>> {
+        KBox::new(value, GFP_KERNEL).map_err(Into::into)
+    }
+
+    #[test]
+    fn test_alloc_kind_alloc() -> Result {
+        test_alloc_kind(AllocKind::Alloc, 0)
+    }
+
+    #[test]
+    fn test_alloc_kind_alloc1() -> Result {
+        test_alloc_kind(AllocKind::Alloc1, 1)
+    }
+
+    fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result {
+        stack_pin_init!(let xa = XArray::new(kind));
+        let mut guard = xa.lock();
+
+        let reservation = guard.reserve_limit(.., GFP_KERNEL)?;
+        assert_eq!(reservation.index(), expected_index);
+        reservation.release_locked(&mut guard)?;
+
+        let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL);
+        assert!(insertion.is_ok());
+        let insertion_index = insertion.unwrap();
+        assert_eq!(insertion_index, expected_index);
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_insert_and_reserve_interaction() -> Result {
+        const IDX: usize = 0x1337;
+
+        fn insert<T: ForeignOwnable>(
+            guard: &mut Guard<'_, T>,
+            value: T,
+        ) -> Result<(), StoreError<T>> {
+            guard.insert(IDX, value, GFP_KERNEL)
+        }
+
+        fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result<Reservation<'a, T>> {
+            guard.reserve(IDX, GFP_KERNEL)
+        }
+
+        #[track_caller]
+        fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox<usize>>) -> Result {
+            // Insertion fails.
+            {
+                let beef = new_kbox(0xbeef)?;
+                let ret = insert(guard, beef);
+                assert!(ret.is_err());
+                let StoreError { error, value } = ret.unwrap_err();
+                assert_eq!(error, EBUSY);
+                assert_eq!(*value, 0xbeef);
+            }
+
+            // Reservation fails.
+            {
+                let ret = reserve(guard);
+                assert!(ret.is_err());
+                assert_eq!(ret.unwrap_err(), EBUSY);
+            }
+
+            Ok(())
+        }
+
+        stack_pin_init!(let xa = XArray::new(Default::default()));
+        let mut guard = xa.lock();
+
+        // Vacant.
+        assert_eq!(guard.get(IDX), None);
+
+        // Reservation succeeds.
+        let reservation = {
+            let ret = reserve(&mut guard);
+            assert!(ret.is_ok());
+            ret.unwrap()
+        };
+
+        // Reserved presents as vacant.
+        assert_eq!(guard.get(IDX), None);
+
+        check_not_vacant(&mut guard)?;
+
+        // Release reservation.
+        {
+            let ret = reservation.release_locked(&mut guard);
+            assert!(ret.is_ok());
+            let () = ret.unwrap();
+        }
+
+        // Vacant again.
+        assert_eq!(guard.get(IDX), None);
+
+        // Insert succeeds.
+        {
+            let dead = new_kbox(0xdead)?;
+            let ret = insert(&mut guard, dead);
+            assert!(ret.is_ok());
+            let () = ret.unwrap();
+        }
+
+        check_not_vacant(&mut guard)?;
+
+        // Remove.
+        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead));
+
+        // Reserve and fill.
+        {
+            let beef = new_kbox(0xbeef)?;
+            let ret = reserve(&mut guard);
+            assert!(ret.is_ok());
+            let reservation = ret.unwrap();
+            let ret = reservation.fill_locked(&mut guard, beef);
+            assert!(ret.is_ok());
+            let () = ret.unwrap();
+        };
+
+        check_not_vacant(&mut guard)?;
+
+        // Remove.
+        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef));
+
+        Ok(())
+    }
+}

-- 
2.50.1
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Andreas Hindborg 1 month, 3 weeks ago
"Tamir Duberstein" <tamird@gmail.com> writes:

> Add `Guard::{insert,reserve}` and `Guard::{insert,reserve}_limit`, which
> are akin to `__xa_{alloc,insert}` in C.
>
> Note that unlike `xa_reserve` which only ensures that memory is
> allocated, the semantics of `Reservation` are stricter and require
> precise management of the reservation. Indices which have been reserved
> can still be overwritten with `Guard::store`, which allows for C-like
> semantics if desired.
>
> `__xa_cmpxchg_raw` is exported to facilitate the semantics described
> above.
>
> Tested-by: Janne Grunau <j@jannau.net>
> Reviewed-by: Janne Grunau <j@jannau.net>
> Signed-off-by: Tamir Duberstein <tamird@gmail.com>

<cut>

> +    /// Stores an element somewhere in the given range of indices.
> +    ///
> +    /// On success, takes ownership of `ptr`.
> +    ///
> +    /// On failure, ownership returns to the caller.
> +    ///
> +    /// # Safety
> +    ///
> +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
> +    unsafe fn alloc(


The naming of this method in C is confusing. Could we call it
insert_limit_raw on the Rust side?

Even though this is private, I think we should also document that the
effect of inserting NULL is to reserve the entry.

> +        &mut self,
> +        limit: impl ops::RangeBounds<u32>,
> +        ptr: *mut T::PointedTo,
> +        gfp: alloc::Flags,
> +    ) -> Result<usize> {
> +        // NB: `xa_limit::{max,min}` are inclusive.
> +        let limit = bindings::xa_limit {
> +            max: match limit.end_bound() {
> +                ops::Bound::Included(&end) => end,
> +                ops::Bound::Excluded(&end) => end - 1,
> +                ops::Bound::Unbounded => u32::MAX,
> +            },
> +            min: match limit.start_bound() {
> +                ops::Bound::Included(&start) => start,
> +                ops::Bound::Excluded(&start) => start + 1,
> +                ops::Bound::Unbounded => 0,
> +            },
> +        };
> +
> +        let mut index = u32::MAX;
> +
> +        // SAFETY:
> +        // - `self.xa` is always valid by the type invariant.
> +        // - `self.xa` was initialized with `XA_FLAGS_ALLOC` or `XA_FLAGS_ALLOC1`.
> +        //
> +        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign`.
> +        match unsafe {
> +            bindings::__xa_alloc(
> +                self.xa.xa.get(),
> +                &mut index,
> +                ptr.cast(),
> +                limit,
> +                gfp.as_raw(),
> +            )
> +        } {
> +            0 => Ok(to_usize(index)),
> +            errno => Err(Error::from_errno(errno)),
> +        }
> +    }
> +
> +    /// Allocates an entry somewhere in the array.

Should we rephrase this to match `alloc`?

  Stores an entry somewhere in the given range of indices.

<cut>

> +impl<T: ForeignOwnable> Reservation<'_, T> {
> +    /// Returns the index of the reservation.
> +    pub fn index(&self) -> usize {
> +        self.index
> +    }
> +
> +    /// Replaces the reserved entry with the given entry.
> +    ///
> +    /// # Safety
> +    ///
> +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.

We should document the effect of replacing with NULL.


Best regards,
Andreas Hindborg
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Tamir Duberstein 1 month, 3 weeks ago
On Mon, Aug 11, 2025 at 9:29 AM Andreas Hindborg <a.hindborg@kernel.org> wrote:
>
> "Tamir Duberstein" <tamird@gmail.com> writes:
>
> > Add `Guard::{insert,reserve}` and `Guard::{insert,reserve}_limit`, which
> > are akin to `__xa_{alloc,insert}` in C.
> >
> > Note that unlike `xa_reserve` which only ensures that memory is
> > allocated, the semantics of `Reservation` are stricter and require
> > precise management of the reservation. Indices which have been reserved
> > can still be overwritten with `Guard::store`, which allows for C-like
> > semantics if desired.
> >
> > `__xa_cmpxchg_raw` is exported to facilitate the semantics described
> > above.
> >
> > Tested-by: Janne Grunau <j@jannau.net>
> > Reviewed-by: Janne Grunau <j@jannau.net>
> > Signed-off-by: Tamir Duberstein <tamird@gmail.com>
>
> <cut>
>
> > +    /// Stores an element somewhere in the given range of indices.
> > +    ///
> > +    /// On success, takes ownership of `ptr`.
> > +    ///
> > +    /// On failure, ownership returns to the caller.
> > +    ///
> > +    /// # Safety
> > +    ///
> > +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
> > +    unsafe fn alloc(
>
>
> The naming of this method in C is confusing. Could we call it
> insert_limit_raw on the Rust side?

I think I prefer to hew close to the C naming. Is there prior art
where Rust names deviate from C names?

> Even though this is private, I think we should also document that the
> effect of inserting NULL is to reserve the entry.

👍

> > +        &mut self,
> > +        limit: impl ops::RangeBounds<u32>,
> > +        ptr: *mut T::PointedTo,
> > +        gfp: alloc::Flags,
> > +    ) -> Result<usize> {
> > +        // NB: `xa_limit::{max,min}` are inclusive.
> > +        let limit = bindings::xa_limit {
> > +            max: match limit.end_bound() {
> > +                ops::Bound::Included(&end) => end,
> > +                ops::Bound::Excluded(&end) => end - 1,
> > +                ops::Bound::Unbounded => u32::MAX,
> > +            },
> > +            min: match limit.start_bound() {
> > +                ops::Bound::Included(&start) => start,
> > +                ops::Bound::Excluded(&start) => start + 1,
> > +                ops::Bound::Unbounded => 0,
> > +            },
> > +        };
> > +
> > +        let mut index = u32::MAX;
> > +
> > +        // SAFETY:
> > +        // - `self.xa` is always valid by the type invariant.
> > +        // - `self.xa` was initialized with `XA_FLAGS_ALLOC` or `XA_FLAGS_ALLOC1`.
> > +        //
> > +        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign`.
> > +        match unsafe {
> > +            bindings::__xa_alloc(
> > +                self.xa.xa.get(),
> > +                &mut index,
> > +                ptr.cast(),
> > +                limit,
> > +                gfp.as_raw(),
> > +            )
> > +        } {
> > +            0 => Ok(to_usize(index)),
> > +            errno => Err(Error::from_errno(errno)),
> > +        }
> > +    }
> > +
> > +    /// Allocates an entry somewhere in the array.
>
> Should we rephrase this to match `alloc`?
>
>   Stores an entry somewhere in the given range of indices.

👍

> <cut>
>
> > +impl<T: ForeignOwnable> Reservation<'_, T> {
> > +    /// Returns the index of the reservation.
> > +    pub fn index(&self) -> usize {
> > +        self.index
> > +    }
> > +
> > +    /// Replaces the reserved entry with the given entry.
> > +    ///
> > +    /// # Safety
> > +    ///
> > +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
>
> We should document the effect of replacing with NULL.

👍

> Best regards,
> Andreas Hindborg
>
>
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Miguel Ojeda 1 month, 3 weeks ago
On Mon, Aug 11, 2025 at 3:43 PM Tamir Duberstein <tamird@gmail.com> wrote:
>
> I think I prefer to hew close to the C naming. Is there prior art
> where Rust names deviate from C names?

If it is a name that exists in the standard library and that we have
to use (e.g. for another standard type/trait), then sometimes we pick
that name instead of the kernel one.

For certain things, like constructors, we try to follow the usual Rust
conventions.

Moreover, sometimes there has been arguments about the chance to
improve naming on Rust abstractions vs. the underlying APIs, e.g.
`iget_locked()` vs. `get_or_create_inode()`.

But, generally, we stick to the C names unless there is a good reason.
It depends on not just the code, but also the C side maintainers and
their plans.

Cheers,
Miguel
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Beata Michalska 1 month, 3 weeks ago
Hi Tamir,

Apologies for such a late drop.

On Sun, Jul 13, 2025 at 08:05:49AM -0400, Tamir Duberstein wrote:
> Add `Guard::{insert,reserve}` and `Guard::{insert,reserve}_limit`, which
> are akin to `__xa_{alloc,insert}` in C.
> 
> Note that unlike `xa_reserve` which only ensures that memory is
> allocated, the semantics of `Reservation` are stricter and require
> precise management of the reservation. Indices which have been reserved
> can still be overwritten with `Guard::store`, which allows for C-like
> semantics if desired.
> 
> `__xa_cmpxchg_raw` is exported to facilitate the semantics described
> above.
> 
> Tested-by: Janne Grunau <j@jannau.net>
> Reviewed-by: Janne Grunau <j@jannau.net>
> Signed-off-by: Tamir Duberstein <tamird@gmail.com>
> ---
>  include/linux/xarray.h |   2 +
>  lib/xarray.c           |  28 ++-
>  rust/helpers/xarray.c  |   5 +
>  rust/kernel/xarray.rs  | 494 +++++++++++++++++++++++++++++++++++++++++++++++--
>  4 files changed, 512 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/xarray.h b/include/linux/xarray.h
> index be850174e802..64f2a5e06ceb 100644
> --- a/include/linux/xarray.h
> +++ b/include/linux/xarray.h
> @@ -563,6 +563,8 @@ void *__xa_erase(struct xarray *, unsigned long index);
>  void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
>  void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
>  		void *entry, gfp_t);
> +void *__xa_cmpxchg_raw(struct xarray *, unsigned long index, void *old,
> +		void *entry, gfp_t);
>  int __must_check __xa_insert(struct xarray *, unsigned long index,
>  		void *entry, gfp_t);
>  int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
> diff --git a/lib/xarray.c b/lib/xarray.c
> index 76dde3a1cacf..58202b6fbb59 100644
> --- a/lib/xarray.c
> +++ b/lib/xarray.c
> @@ -1738,9 +1738,6 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
>  }
>  EXPORT_SYMBOL(xa_store);
>  
> -static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
> -			void *old, void *entry, gfp_t gfp);
> -
>  /**
>   * __xa_cmpxchg() - Conditionally replace an entry in the XArray.
>   * @xa: XArray.
> @@ -1767,7 +1764,29 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
>  }
>  EXPORT_SYMBOL(__xa_cmpxchg);
>  
> -static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
> +/**
> + * __xa_cmpxchg_raw() - Conditionally replace an entry in the XArray.
> + * @xa: XArray.
> + * @index: Index into array.
> + * @old: Old value to test against.
> + * @entry: New value to place in array.
> + * @gfp: Memory allocation flags.
> + *
> + * You must already be holding the xa_lock when calling this function.
> + * It will drop the lock if needed to allocate memory, and then reacquire
> + * it afterwards.
> + *
> + * If the entry at @index is the same as @old, replace it with @entry.
> + * If the return value is equal to @old, then the exchange was successful.
> + *
> + * This function is the same as __xa_cmpxchg() except that it does not coerce
> + * XA_ZERO_ENTRY to NULL on egress.
> + *
> + * Context: Any context.  Expects xa_lock to be held on entry.  May
> + * release and reacquire xa_lock if @gfp flags permit.
> + * Return: The old value at this index or xa_err() if an error happened.
> + */
> +void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
>  			void *old, void *entry, gfp_t gfp)
>  {
>  	XA_STATE(xas, xa, index);
> @@ -1787,6 +1806,7 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
>  
>  	return xas_result(&xas, curr);
>  }
> +EXPORT_SYMBOL(__xa_cmpxchg_raw);
>  
>  /**
>   * __xa_insert() - Store this entry in the XArray if no entry is present.
> diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c
> index 60b299f11451..b6c078e6a343 100644
> --- a/rust/helpers/xarray.c
> +++ b/rust/helpers/xarray.c
> @@ -2,6 +2,11 @@
>  
>  #include <linux/xarray.h>
>  
> +void *rust_helper_xa_zero_entry(void)
> +{
> +	return XA_ZERO_ENTRY;
> +}
> +
>  int rust_helper_xa_err(void *entry)
>  {
>  	return xa_err(entry);
> diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
> index 101f61c0362d..a43414bb4d7e 100644
> --- a/rust/kernel/xarray.rs
> +++ b/rust/kernel/xarray.rs
> @@ -9,7 +9,12 @@
>      prelude::*,
>      types::{ForeignOwnable, NotThreadSafe, Opaque},
>  };
> -use core::{iter, marker::PhantomData, mem, ptr::NonNull};
> +use core::{
> +    fmt, iter,
> +    marker::PhantomData,
> +    mem, ops,
> +    ptr::{null_mut, NonNull},
> +};
>  
>  /// An array which efficiently maps sparse integer indices to owned objects.
>  ///
> @@ -25,29 +30,81 @@
>  ///
>  /// ```rust
>  /// # use kernel::alloc::KBox;
> -/// # use kernel::xarray::XArray;
> +/// # use kernel::xarray::{StoreError, XArray};
>  /// # use pin_init::stack_pin_init;
>  ///
>  /// stack_pin_init!(let xa = XArray::new(Default::default()));
>  ///
>  /// let dead = KBox::new(0xdead, GFP_KERNEL)?;
>  /// let beef = KBox::new(0xbeef, GFP_KERNEL)?;
> +/// let leet = KBox::new(0x1337, GFP_KERNEL)?;
> +///
> +/// let mut guard = xa.lock();
> +///
> +/// let index = guard.insert_limit(.., dead, GFP_KERNEL)?;
> +///
> +/// assert_eq!(guard.get(index), Some(&0xdead));
> +///
> +/// let beef = {
> +///     let ret = guard.insert(index, beef, GFP_KERNEL);
> +///     assert!(ret.is_err());
> +///     let StoreError { value, error } = ret.unwrap_err();
> +///     assert_eq!(error, EBUSY);
> +///     value
> +/// };
> +///
> +/// let reservation = guard.reserve_limit(.., GFP_KERNEL);
> +/// assert!(reservation.is_ok());
> +/// let reservation1 = reservation.unwrap();
> +/// let reservation = guard.reserve_limit(.., GFP_KERNEL);
> +/// assert!(reservation.is_ok());
> +/// let reservation2 = reservation.unwrap();
> +///
> +/// assert_eq!(reservation1.index(), index + 1);
> +/// assert_eq!(reservation2.index(), index + 2);
> +///
> +/// let dead = {
> +///     let ret = guard.remove(index);
> +///     assert!(ret.is_some());
> +///     ret.unwrap()
> +/// };
> +/// assert_eq!(*dead, 0xdead);
> +///
> +/// drop(guard); // Reservations can outlive the guard.
> +///
> +/// let () = reservation1.fill(dead)?;
> +///
> +/// let index = reservation2.index();
>  ///
>  /// let mut guard = xa.lock();
>  ///
> -/// assert_eq!(guard.get(0), None);
> +/// let beef = {
> +///     let ret = guard.insert(index, beef, GFP_KERNEL);
> +///     assert!(ret.is_err());
> +///     let StoreError { value, error } = ret.unwrap_err();
> +///     assert_eq!(error, EBUSY);
> +///     value
> +/// };
>  ///
> -/// assert_eq!(guard.store(0, dead, GFP_KERNEL)?.as_deref(), None);
> -/// assert_eq!(guard.get(0).copied(), Some(0xdead));
> +/// // `store` ignores reservations.
> +/// {
> +///    let ret = guard.store(index, beef, GFP_KERNEL);
> +///    assert!(ret.is_ok());
> +///    assert_eq!(ret.unwrap().as_deref(), None);
> +/// }
>  ///
> -/// *guard.get_mut(0).unwrap() = 0xffff;
> -/// assert_eq!(guard.get(0).copied(), Some(0xffff));
> +/// assert_eq!(guard.get(index), Some(&0xbeef));
>  ///
> -/// assert_eq!(guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), Some(0xffff));
> -/// assert_eq!(guard.get(0).copied(), Some(0xbeef));
> +/// // We trampled the reservation above, so filling should fail.
> +/// let leet = {
> +///    let ret = reservation2.fill_locked(&mut guard, leet);
> +///    assert!(ret.is_err());
> +///    let StoreError { value, error } = ret.unwrap_err();
> +///    assert_eq!(error, EBUSY);
> +///    value
> +/// };
>  ///
> -/// guard.remove(0);
> -/// assert_eq!(guard.get(0), None);
> +/// assert_eq!(guard.get(index), Some(&0xbeef));
>  ///
>  /// # Ok::<(), Error>(())
>  /// ```
> @@ -126,6 +183,19 @@ fn iter(&self) -> impl Iterator<Item = NonNull<T::PointedTo>> + '_ {
>          .map_while(|ptr| NonNull::new(ptr.cast()))
>      }
>  
> +    fn with_guard<F, U>(&self, guard: Option<&mut Guard<'_, T>>, f: F) -> U
> +    where
> +        F: FnOnce(&mut Guard<'_, T>) -> U,
> +    {
> +        match guard {
> +            None => f(&mut self.lock()),
> +            Some(guard) => {
> +                assert_eq!(guard.xa.xa.get(), self.xa.get());
> +                f(guard)
> +            }
> +        }
> +    }
> +
>      /// Attempts to lock the [`XArray`] for exclusive access.
>      pub fn try_lock(&self) -> Option<Guard<'_, T>> {
>          // SAFETY: `self.xa` is always valid by the type invariant.
> @@ -172,6 +242,7 @@ fn drop(&mut self) {
>  /// The error returned by [`store`](Guard::store).
>  ///
>  /// Contains the underlying error and the value that was not stored.
> +#[derive(Debug)]
>  pub struct StoreError<T> {
>      /// The error that occurred.
>      pub error: Error,
> @@ -185,6 +256,11 @@ fn from(value: StoreError<T>) -> Self {
>      }
>  }
>  
> +fn to_usize(i: u32) -> usize {
> +    i.try_into()
> +        .unwrap_or_else(|_| build_error!("cannot convert u32 to usize"))
> +}
> +
>  impl<'a, T: ForeignOwnable> Guard<'a, T> {
>      fn load<F, U>(&self, index: usize, f: F) -> Option<U>
>      where
> @@ -219,7 +295,7 @@ pub fn remove(&mut self, index: usize) -> Option<T> {
>          // - The caller holds the lock.
>          let ptr = unsafe { bindings::__xa_erase(self.xa.xa.get(), index) }.cast();
>          // SAFETY:
> -        // - `ptr` is either NULL or came from `T::into_foreign`.
> +        // - `ptr` is either `NULL` or came from `T::into_foreign`.
>          // - `&mut self` guarantees that the lifetimes of [`T::Borrowed`] and [`T::BorrowedMut`]
>          // borrowed from `self` have ended.
>          unsafe { T::try_from_foreign(ptr) }
> @@ -267,13 +343,272 @@ pub fn store(
>              })
>          } else {
>              let old = old.cast();
> -            // SAFETY: `ptr` is either NULL or came from `T::into_foreign`.
> +            // SAFETY: `ptr` is either `NULL` or came from `T::into_foreign`.
>              //
>              // NB: `XA_ZERO_ENTRY` is never returned by functions belonging to the Normal XArray
>              // API; such entries present as `NULL`.
>              Ok(unsafe { T::try_from_foreign(old) })
>          }
>      }
> +
> +    /// Stores an element at the given index if no entry is present.
> +    ///
> +    /// May drop the lock if needed to allocate memory, and then reacquire it afterwards.
> +    ///
> +    /// On failure, returns the element which was attempted to be stored.
> +    pub fn insert(
> +        &mut self,
> +        index: usize,
> +        value: T,
> +        gfp: alloc::Flags,
> +    ) -> Result<(), StoreError<T>> {
> +        build_assert!(
> +            mem::align_of::<T::PointedTo>() >= 4,
> +            "pointers stored in XArray must be 4-byte aligned"
> +        );
> +        let ptr = value.into_foreign();
> +        // SAFETY: `self.xa` is always valid by the type invariant.
> +        //
> +        // INVARIANT: `ptr` came from `T::into_foreign`.
> +        match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr.cast(), gfp.as_raw()) } {
> +            0 => Ok(()),
> +            errno => {
> +                // SAFETY: `ptr` came from `T::into_foreign` and `__xa_insert` does not take
> +                // ownership of the value on error.
> +                let value = unsafe { T::from_foreign(ptr) };
> +                Err(StoreError {
> +                    value,
> +                    error: Error::from_errno(errno),
> +                })
> +            }
> +        }
> +    }
> +
> +    /// Stores an element somewhere in the given range of indices.
> +    ///
> +    /// On success, takes ownership of `ptr`.
> +    ///
> +    /// On failure, ownership returns to the caller.
> +    ///
> +    /// # Safety
> +    ///
> +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
> +    unsafe fn alloc(
> +        &mut self,
> +        limit: impl ops::RangeBounds<u32>,
> +        ptr: *mut T::PointedTo,
> +        gfp: alloc::Flags,
> +    ) -> Result<usize> {
> +        // NB: `xa_limit::{max,min}` are inclusive.
> +        let limit = bindings::xa_limit {
> +            max: match limit.end_bound() {
> +                ops::Bound::Included(&end) => end,
> +                ops::Bound::Excluded(&end) => end - 1,
> +                ops::Bound::Unbounded => u32::MAX,
> +            },
> +            min: match limit.start_bound() {
> +                ops::Bound::Included(&start) => start,
> +                ops::Bound::Excluded(&start) => start + 1,
> +                ops::Bound::Unbounded => 0,
> +            },
> +        };
> +
> +        let mut index = u32::MAX;
> +
> +        // SAFETY:
> +        // - `self.xa` is always valid by the type invariant.
> +        // - `self.xa` was initialized with `XA_FLAGS_ALLOC` or `XA_FLAGS_ALLOC1`.
> +        //
> +        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign`.
> +        match unsafe {
> +            bindings::__xa_alloc(
> +                self.xa.xa.get(),
> +                &mut index,
> +                ptr.cast(),
> +                limit,
> +                gfp.as_raw(),
> +            )
> +        } {
> +            0 => Ok(to_usize(index)),
> +            errno => Err(Error::from_errno(errno)),
> +        }
> +    }
> +
> +    /// Allocates an entry somewhere in the array.
> +    ///
> +    /// On success, returns the index at which the entry was stored.
> +    ///
> +    /// On failure, returns the entry which was attempted to be stored.
> +    pub fn insert_limit(
> +        &mut self,
> +        limit: impl ops::RangeBounds<u32>,
> +        value: T,
> +        gfp: alloc::Flags,
> +    ) -> Result<usize, StoreError<T>> {
> +        build_assert!(
> +            mem::align_of::<T::PointedTo>() >= 4,
> +            "pointers stored in XArray must be 4-byte aligned"
> +        );
> +        let ptr = value.into_foreign();
> +        // SAFETY: `ptr` came from `T::into_foreign`.
> +        unsafe { self.alloc(limit, ptr, gfp) }.map_err(|error| {
> +            // SAFETY: `ptr` came from `T::into_foreign` and `self.alloc` does not take ownership of
> +            // the value on error.
> +            let value = unsafe { T::from_foreign(ptr) };
> +            StoreError { value, error }
> +        })
> +    }
> +
> +    /// Reserves an entry in the array.
> +    pub fn reserve(&mut self, index: usize, gfp: alloc::Flags) -> Result<Reservation<'a, T>> {
> +        // NB: `__xa_insert` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress.
> +        let ptr = null_mut();
> +        // SAFETY: `self.xa` is always valid by the type invariant.
> +        //
> +        // INVARIANT: `ptr` is `NULL`.
> +        match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr, gfp.as_raw()) } {
> +            0 => Ok(Reservation { xa: self.xa, index }),
> +            errno => Err(Error::from_errno(errno)),
> +        }
> +    }
> +
> +    /// Reserves an entry somewhere in the array.
> +    pub fn reserve_limit(
> +        &mut self,
> +        limit: impl ops::RangeBounds<u32>,
> +        gfp: alloc::Flags,
> +    ) -> Result<Reservation<'a, T>> {
> +        // NB: `__xa_alloc` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress.
> +        let ptr = null_mut();
> +        // SAFETY: `ptr` is `NULL`.
> +        unsafe { self.alloc(limit, ptr, gfp) }.map(|index| Reservation { xa: self.xa, index })
> +    }
> +}
> +
> +/// A reserved slot in an array.
> +///
> +/// The slot is released when the reservation goes out of scope.
> +///
> +/// Note that the array lock *must not* be held when the reservation is filled or dropped as this
> +/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be
> +/// used in context where the array lock is held.
> +#[must_use = "the reservation is released immediately when the reservation is unused"]
> +pub struct Reservation<'a, T: ForeignOwnable> {
> +    xa: &'a XArray<T>,
> +    index: usize,
> +}
> +
> +impl<T: ForeignOwnable> fmt::Debug for Reservation<'_, T> {
> +    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
> +        f.debug_struct("Reservation")
> +            .field("index", &self.index())
> +            .finish()
> +    }
> +}
> +
> +impl<T: ForeignOwnable> Reservation<'_, T> {
> +    /// Returns the index of the reservation.
> +    pub fn index(&self) -> usize {
> +        self.index
> +    }
> +
> +    /// Replaces the reserved entry with the given entry.
> +    ///
> +    /// # Safety
> +    ///
> +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
> +    unsafe fn replace(guard: &mut Guard<'_, T>, index: usize, ptr: *mut T::PointedTo) -> Result {
> +        // SAFETY: `xa_zero_entry` wraps `XA_ZERO_ENTRY` which is always safe to use.
> +        let old = unsafe { bindings::xa_zero_entry() };
> +
> +        // NB: `__xa_cmpxchg_raw` is used over `__xa_cmpxchg` because the latter coerces
> +        // `XA_ZERO_ENTRY` to `NULL` on egress, which would prevent us from determining whether a
> +        // replacement was made.
> +        //
> +        // SAFETY: `self.xa` is always valid by the type invariant.
> +        //
> +        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign` and `old` is
> +        // `XA_ZERO_ENTRY`.
> +        let ret =
> +            unsafe { bindings::__xa_cmpxchg_raw(guard.xa.xa.get(), index, old, ptr.cast(), 0) };
> +
> +        // SAFETY: `__xa_cmpxchg_raw` returns the old entry at this index on success or `xa_err` if
> +        // an error happened.
> +        match unsafe { bindings::xa_err(ret) } {
> +            0 => {
> +                if ret == old {
> +                    Ok(())
> +                } else {
> +                    Err(EBUSY)
> +                }
> +            }
> +            errno => Err(Error::from_errno(errno)),
> +        }
> +    }
> +
> +    fn fill_inner(&self, guard: Option<&mut Guard<'_, T>>, value: T) -> Result<(), StoreError<T>> {
> +        let Self { xa, index } = self;
> +        let index = *index;
> +
> +        let ptr = value.into_foreign();
> +        xa.with_guard(guard, |guard| {
> +            // SAFETY: `ptr` came from `T::into_foreign`.
> +            unsafe { Self::replace(guard, index, ptr) }
> +        })
> +        .map_err(|error| {
> +            // SAFETY: `ptr` came from `T::into_foreign` and `Self::replace` does not take ownership
> +            // of the value on error.
> +            let value = unsafe { T::from_foreign(ptr) };
> +            StoreError { value, error }
> +        })
> +    }
> +
> +    /// Fills the reservation.
> +    pub fn fill(self, value: T) -> Result<(), StoreError<T>> {
> +        let result = self.fill_inner(None, value);
> +        mem::forget(self);
> +        result
> +    }
> +
> +    /// Fills the reservation without acquiring the array lock.
> +    ///
> +    /// # Panics
> +    ///
> +    /// Panics if the passed guard locks a different array.
> +    pub fn fill_locked(self, guard: &mut Guard<'_, T>, value: T) -> Result<(), StoreError<T>> {
> +        let result = self.fill_inner(Some(guard), value);
> +        mem::forget(self);
> +        result
> +    }
> +
> +    fn release_inner(&self, guard: Option<&mut Guard<'_, T>>) -> Result {
> +        let Self { xa, index } = self;
> +        let index = *index;
> +
> +        xa.with_guard(guard, |guard| {
> +            let ptr = null_mut();
> +            // SAFETY: `ptr` is `NULL`.
> +            unsafe { Self::replace(guard, index, ptr) }
> +        })
> +    }
> +
> +    /// Releases the reservation without acquiring the array lock.
> +    ///
> +    /// # Panics
> +    ///
> +    /// Panics if the passed guard locks a different array.
> +    pub fn release_locked(self, guard: &mut Guard<'_, T>) -> Result {
> +        let result = self.release_inner(Some(guard));
> +        mem::forget(self);
> +        result
> +    }
> +}
> +
> +impl<T: ForeignOwnable> Drop for Reservation<'_, T> {
> +    fn drop(&mut self) {
> +        // NB: Errors here are possible since `Guard::store` does not honor reservations.
> +        let _: Result = self.release_inner(None);
This seems bit risky as one can drop the reservation while still holding the
lock?
> +    }
>  }
>  
>  // SAFETY: `XArray<T>` has no shared mutable state so it is `Send` iff `T` is `Send`.
> @@ -282,3 +617,136 @@ unsafe impl<T: ForeignOwnable + Send> Send for XArray<T> {}
>  // SAFETY: `XArray<T>` serialises the interior mutability it provides so it is `Sync` iff `T` is
>  // `Send`.
>  unsafe impl<T: ForeignOwnable + Send> Sync for XArray<T> {}
> +
> +#[macros::kunit_tests(rust_xarray_kunit)]
> +mod tests {
> +    use super::*;
> +    use pin_init::stack_pin_init;
> +
> +    fn new_kbox<T>(value: T) -> Result<KBox<T>> {
> +        KBox::new(value, GFP_KERNEL).map_err(Into::into)
I believe this should be GFP_ATOMIC as it is being called while holding the xa
lock.

Otherwise:

Tested-By: Beata Michalska <beata.michalska@arm.com>

---
BR
Beata
> +    }
> +
> +    #[test]
> +    fn test_alloc_kind_alloc() -> Result {
> +        test_alloc_kind(AllocKind::Alloc, 0)
> +    }
> +
> +    #[test]
> +    fn test_alloc_kind_alloc1() -> Result {
> +        test_alloc_kind(AllocKind::Alloc1, 1)
> +    }
> +
> +    fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result {
> +        stack_pin_init!(let xa = XArray::new(kind));
> +        let mut guard = xa.lock();
> +
> +        let reservation = guard.reserve_limit(.., GFP_KERNEL)?;
> +        assert_eq!(reservation.index(), expected_index);
> +        reservation.release_locked(&mut guard)?;
> +
> +        let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL);
> +        assert!(insertion.is_ok());
> +        let insertion_index = insertion.unwrap();
> +        assert_eq!(insertion_index, expected_index);
> +
> +        Ok(())
> +    }
> +
> +    #[test]
> +    fn test_insert_and_reserve_interaction() -> Result {
> +        const IDX: usize = 0x1337;
> +
> +        fn insert<T: ForeignOwnable>(
> +            guard: &mut Guard<'_, T>,
> +            value: T,
> +        ) -> Result<(), StoreError<T>> {
> +            guard.insert(IDX, value, GFP_KERNEL)
> +        }
> +
> +        fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result<Reservation<'a, T>> {
> +            guard.reserve(IDX, GFP_KERNEL)
> +        }
> +
> +        #[track_caller]
> +        fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox<usize>>) -> Result {
> +            // Insertion fails.
> +            {
> +                let beef = new_kbox(0xbeef)?;
> +                let ret = insert(guard, beef);
> +                assert!(ret.is_err());
> +                let StoreError { error, value } = ret.unwrap_err();
> +                assert_eq!(error, EBUSY);
> +                assert_eq!(*value, 0xbeef);
> +            }
> +
> +            // Reservation fails.
> +            {
> +                let ret = reserve(guard);
> +                assert!(ret.is_err());
> +                assert_eq!(ret.unwrap_err(), EBUSY);
> +            }
> +
> +            Ok(())
> +        }
> +
> +        stack_pin_init!(let xa = XArray::new(Default::default()));
> +        let mut guard = xa.lock();
> +
> +        // Vacant.
> +        assert_eq!(guard.get(IDX), None);
> +
> +        // Reservation succeeds.
> +        let reservation = {
> +            let ret = reserve(&mut guard);
> +            assert!(ret.is_ok());
> +            ret.unwrap()
> +        };
> +
> +        // Reserved presents as vacant.
> +        assert_eq!(guard.get(IDX), None);
> +
> +        check_not_vacant(&mut guard)?;
> +
> +        // Release reservation.
> +        {
> +            let ret = reservation.release_locked(&mut guard);
> +            assert!(ret.is_ok());
> +            let () = ret.unwrap();
> +        }
> +
> +        // Vacant again.
> +        assert_eq!(guard.get(IDX), None);
> +
> +        // Insert succeeds.
> +        {
> +            let dead = new_kbox(0xdead)?;
> +            let ret = insert(&mut guard, dead);
> +            assert!(ret.is_ok());
> +            let () = ret.unwrap();
> +        }
> +
> +        check_not_vacant(&mut guard)?;
> +
> +        // Remove.
> +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead));
> +
> +        // Reserve and fill.
> +        {
> +            let beef = new_kbox(0xbeef)?;
> +            let ret = reserve(&mut guard);
> +            assert!(ret.is_ok());
> +            let reservation = ret.unwrap();
> +            let ret = reservation.fill_locked(&mut guard, beef);
> +            assert!(ret.is_ok());
> +            let () = ret.unwrap();
> +        };
> +
> +        check_not_vacant(&mut guard)?;
> +
> +        // Remove.
> +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef));
> +
> +        Ok(())
> +    }
> +}
> 
> -- 
> 2.50.1
> 
>
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Tamir Duberstein 1 month, 3 weeks ago
On Mon, Aug 11, 2025 at 8:57 AM Beata Michalska <beata.michalska@arm.com> wrote:
>
> Hi Tamir,
>
> Apologies for such a late drop.

Hi Beata, no worries, thanks for your review.

>
> On Sun, Jul 13, 2025 at 08:05:49AM -0400, Tamir Duberstein wrote:
> > Add `Guard::{insert,reserve}` and `Guard::{insert,reserve}_limit`, which
> > are akin to `__xa_{alloc,insert}` in C.
> >
> > Note that unlike `xa_reserve` which only ensures that memory is
> > allocated, the semantics of `Reservation` are stricter and require
> > precise management of the reservation. Indices which have been reserved
> > can still be overwritten with `Guard::store`, which allows for C-like
> > semantics if desired.
> >
> > `__xa_cmpxchg_raw` is exported to facilitate the semantics described
> > above.
> >
> > Tested-by: Janne Grunau <j@jannau.net>
> > Reviewed-by: Janne Grunau <j@jannau.net>
> > Signed-off-by: Tamir Duberstein <tamird@gmail.com>
> > ---
> >  include/linux/xarray.h |   2 +
> >  lib/xarray.c           |  28 ++-
> >  rust/helpers/xarray.c  |   5 +
> >  rust/kernel/xarray.rs  | 494 +++++++++++++++++++++++++++++++++++++++++++++++--
> >  4 files changed, 512 insertions(+), 17 deletions(-)
> >
> > diff --git a/include/linux/xarray.h b/include/linux/xarray.h
> > index be850174e802..64f2a5e06ceb 100644
> > --- a/include/linux/xarray.h
> > +++ b/include/linux/xarray.h
> > @@ -563,6 +563,8 @@ void *__xa_erase(struct xarray *, unsigned long index);
> >  void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
> >  void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
> >               void *entry, gfp_t);
> > +void *__xa_cmpxchg_raw(struct xarray *, unsigned long index, void *old,
> > +             void *entry, gfp_t);
> >  int __must_check __xa_insert(struct xarray *, unsigned long index,
> >               void *entry, gfp_t);
> >  int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
> > diff --git a/lib/xarray.c b/lib/xarray.c
> > index 76dde3a1cacf..58202b6fbb59 100644
> > --- a/lib/xarray.c
> > +++ b/lib/xarray.c
> > @@ -1738,9 +1738,6 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
> >  }
> >  EXPORT_SYMBOL(xa_store);
> >
> > -static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
> > -                     void *old, void *entry, gfp_t gfp);
> > -
> >  /**
> >   * __xa_cmpxchg() - Conditionally replace an entry in the XArray.
> >   * @xa: XArray.
> > @@ -1767,7 +1764,29 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
> >  }
> >  EXPORT_SYMBOL(__xa_cmpxchg);
> >
> > -static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
> > +/**
> > + * __xa_cmpxchg_raw() - Conditionally replace an entry in the XArray.
> > + * @xa: XArray.
> > + * @index: Index into array.
> > + * @old: Old value to test against.
> > + * @entry: New value to place in array.
> > + * @gfp: Memory allocation flags.
> > + *
> > + * You must already be holding the xa_lock when calling this function.
> > + * It will drop the lock if needed to allocate memory, and then reacquire
> > + * it afterwards.
> > + *
> > + * If the entry at @index is the same as @old, replace it with @entry.
> > + * If the return value is equal to @old, then the exchange was successful.
> > + *
> > + * This function is the same as __xa_cmpxchg() except that it does not coerce
> > + * XA_ZERO_ENTRY to NULL on egress.
> > + *
> > + * Context: Any context.  Expects xa_lock to be held on entry.  May
> > + * release and reacquire xa_lock if @gfp flags permit.
> > + * Return: The old value at this index or xa_err() if an error happened.
> > + */
> > +void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
> >                       void *old, void *entry, gfp_t gfp)
> >  {
> >       XA_STATE(xas, xa, index);
> > @@ -1787,6 +1806,7 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
> >
> >       return xas_result(&xas, curr);
> >  }
> > +EXPORT_SYMBOL(__xa_cmpxchg_raw);
> >
> >  /**
> >   * __xa_insert() - Store this entry in the XArray if no entry is present.
> > diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c
> > index 60b299f11451..b6c078e6a343 100644
> > --- a/rust/helpers/xarray.c
> > +++ b/rust/helpers/xarray.c
> > @@ -2,6 +2,11 @@
> >
> >  #include <linux/xarray.h>
> >
> > +void *rust_helper_xa_zero_entry(void)
> > +{
> > +     return XA_ZERO_ENTRY;
> > +}
> > +
> >  int rust_helper_xa_err(void *entry)
> >  {
> >       return xa_err(entry);
> > diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
> > index 101f61c0362d..a43414bb4d7e 100644
> > --- a/rust/kernel/xarray.rs
> > +++ b/rust/kernel/xarray.rs
> > @@ -9,7 +9,12 @@
> >      prelude::*,
> >      types::{ForeignOwnable, NotThreadSafe, Opaque},
> >  };
> > -use core::{iter, marker::PhantomData, mem, ptr::NonNull};
> > +use core::{
> > +    fmt, iter,
> > +    marker::PhantomData,
> > +    mem, ops,
> > +    ptr::{null_mut, NonNull},
> > +};
> >
> >  /// An array which efficiently maps sparse integer indices to owned objects.
> >  ///
> > @@ -25,29 +30,81 @@
> >  ///
> >  /// ```rust
> >  /// # use kernel::alloc::KBox;
> > -/// # use kernel::xarray::XArray;
> > +/// # use kernel::xarray::{StoreError, XArray};
> >  /// # use pin_init::stack_pin_init;
> >  ///
> >  /// stack_pin_init!(let xa = XArray::new(Default::default()));
> >  ///
> >  /// let dead = KBox::new(0xdead, GFP_KERNEL)?;
> >  /// let beef = KBox::new(0xbeef, GFP_KERNEL)?;
> > +/// let leet = KBox::new(0x1337, GFP_KERNEL)?;
> > +///
> > +/// let mut guard = xa.lock();
> > +///
> > +/// let index = guard.insert_limit(.., dead, GFP_KERNEL)?;
> > +///
> > +/// assert_eq!(guard.get(index), Some(&0xdead));
> > +///
> > +/// let beef = {
> > +///     let ret = guard.insert(index, beef, GFP_KERNEL);
> > +///     assert!(ret.is_err());
> > +///     let StoreError { value, error } = ret.unwrap_err();
> > +///     assert_eq!(error, EBUSY);
> > +///     value
> > +/// };
> > +///
> > +/// let reservation = guard.reserve_limit(.., GFP_KERNEL);
> > +/// assert!(reservation.is_ok());
> > +/// let reservation1 = reservation.unwrap();
> > +/// let reservation = guard.reserve_limit(.., GFP_KERNEL);
> > +/// assert!(reservation.is_ok());
> > +/// let reservation2 = reservation.unwrap();
> > +///
> > +/// assert_eq!(reservation1.index(), index + 1);
> > +/// assert_eq!(reservation2.index(), index + 2);
> > +///
> > +/// let dead = {
> > +///     let ret = guard.remove(index);
> > +///     assert!(ret.is_some());
> > +///     ret.unwrap()
> > +/// };
> > +/// assert_eq!(*dead, 0xdead);
> > +///
> > +/// drop(guard); // Reservations can outlive the guard.
> > +///
> > +/// let () = reservation1.fill(dead)?;
> > +///
> > +/// let index = reservation2.index();
> >  ///
> >  /// let mut guard = xa.lock();
> >  ///
> > -/// assert_eq!(guard.get(0), None);
> > +/// let beef = {
> > +///     let ret = guard.insert(index, beef, GFP_KERNEL);
> > +///     assert!(ret.is_err());
> > +///     let StoreError { value, error } = ret.unwrap_err();
> > +///     assert_eq!(error, EBUSY);
> > +///     value
> > +/// };
> >  ///
> > -/// assert_eq!(guard.store(0, dead, GFP_KERNEL)?.as_deref(), None);
> > -/// assert_eq!(guard.get(0).copied(), Some(0xdead));
> > +/// // `store` ignores reservations.
> > +/// {
> > +///    let ret = guard.store(index, beef, GFP_KERNEL);
> > +///    assert!(ret.is_ok());
> > +///    assert_eq!(ret.unwrap().as_deref(), None);
> > +/// }
> >  ///
> > -/// *guard.get_mut(0).unwrap() = 0xffff;
> > -/// assert_eq!(guard.get(0).copied(), Some(0xffff));
> > +/// assert_eq!(guard.get(index), Some(&0xbeef));
> >  ///
> > -/// assert_eq!(guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), Some(0xffff));
> > -/// assert_eq!(guard.get(0).copied(), Some(0xbeef));
> > +/// // We trampled the reservation above, so filling should fail.
> > +/// let leet = {
> > +///    let ret = reservation2.fill_locked(&mut guard, leet);
> > +///    assert!(ret.is_err());
> > +///    let StoreError { value, error } = ret.unwrap_err();
> > +///    assert_eq!(error, EBUSY);
> > +///    value
> > +/// };
> >  ///
> > -/// guard.remove(0);
> > -/// assert_eq!(guard.get(0), None);
> > +/// assert_eq!(guard.get(index), Some(&0xbeef));
> >  ///
> >  /// # Ok::<(), Error>(())
> >  /// ```
> > @@ -126,6 +183,19 @@ fn iter(&self) -> impl Iterator<Item = NonNull<T::PointedTo>> + '_ {
> >          .map_while(|ptr| NonNull::new(ptr.cast()))
> >      }
> >
> > +    fn with_guard<F, U>(&self, guard: Option<&mut Guard<'_, T>>, f: F) -> U
> > +    where
> > +        F: FnOnce(&mut Guard<'_, T>) -> U,
> > +    {
> > +        match guard {
> > +            None => f(&mut self.lock()),
> > +            Some(guard) => {
> > +                assert_eq!(guard.xa.xa.get(), self.xa.get());
> > +                f(guard)
> > +            }
> > +        }
> > +    }
> > +
> >      /// Attempts to lock the [`XArray`] for exclusive access.
> >      pub fn try_lock(&self) -> Option<Guard<'_, T>> {
> >          // SAFETY: `self.xa` is always valid by the type invariant.
> > @@ -172,6 +242,7 @@ fn drop(&mut self) {
> >  /// The error returned by [`store`](Guard::store).
> >  ///
> >  /// Contains the underlying error and the value that was not stored.
> > +#[derive(Debug)]
> >  pub struct StoreError<T> {
> >      /// The error that occurred.
> >      pub error: Error,
> > @@ -185,6 +256,11 @@ fn from(value: StoreError<T>) -> Self {
> >      }
> >  }
> >
> > +fn to_usize(i: u32) -> usize {
> > +    i.try_into()
> > +        .unwrap_or_else(|_| build_error!("cannot convert u32 to usize"))
> > +}
> > +
> >  impl<'a, T: ForeignOwnable> Guard<'a, T> {
> >      fn load<F, U>(&self, index: usize, f: F) -> Option<U>
> >      where
> > @@ -219,7 +295,7 @@ pub fn remove(&mut self, index: usize) -> Option<T> {
> >          // - The caller holds the lock.
> >          let ptr = unsafe { bindings::__xa_erase(self.xa.xa.get(), index) }.cast();
> >          // SAFETY:
> > -        // - `ptr` is either NULL or came from `T::into_foreign`.
> > +        // - `ptr` is either `NULL` or came from `T::into_foreign`.
> >          // - `&mut self` guarantees that the lifetimes of [`T::Borrowed`] and [`T::BorrowedMut`]
> >          // borrowed from `self` have ended.
> >          unsafe { T::try_from_foreign(ptr) }
> > @@ -267,13 +343,272 @@ pub fn store(
> >              })
> >          } else {
> >              let old = old.cast();
> > -            // SAFETY: `ptr` is either NULL or came from `T::into_foreign`.
> > +            // SAFETY: `ptr` is either `NULL` or came from `T::into_foreign`.
> >              //
> >              // NB: `XA_ZERO_ENTRY` is never returned by functions belonging to the Normal XArray
> >              // API; such entries present as `NULL`.
> >              Ok(unsafe { T::try_from_foreign(old) })
> >          }
> >      }
> > +
> > +    /// Stores an element at the given index if no entry is present.
> > +    ///
> > +    /// May drop the lock if needed to allocate memory, and then reacquire it afterwards.
> > +    ///
> > +    /// On failure, returns the element which was attempted to be stored.
> > +    pub fn insert(
> > +        &mut self,
> > +        index: usize,
> > +        value: T,
> > +        gfp: alloc::Flags,
> > +    ) -> Result<(), StoreError<T>> {
> > +        build_assert!(
> > +            mem::align_of::<T::PointedTo>() >= 4,
> > +            "pointers stored in XArray must be 4-byte aligned"
> > +        );
> > +        let ptr = value.into_foreign();
> > +        // SAFETY: `self.xa` is always valid by the type invariant.
> > +        //
> > +        // INVARIANT: `ptr` came from `T::into_foreign`.
> > +        match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr.cast(), gfp.as_raw()) } {
> > +            0 => Ok(()),
> > +            errno => {
> > +                // SAFETY: `ptr` came from `T::into_foreign` and `__xa_insert` does not take
> > +                // ownership of the value on error.
> > +                let value = unsafe { T::from_foreign(ptr) };
> > +                Err(StoreError {
> > +                    value,
> > +                    error: Error::from_errno(errno),
> > +                })
> > +            }
> > +        }
> > +    }
> > +
> > +    /// Stores an element somewhere in the given range of indices.
> > +    ///
> > +    /// On success, takes ownership of `ptr`.
> > +    ///
> > +    /// On failure, ownership returns to the caller.
> > +    ///
> > +    /// # Safety
> > +    ///
> > +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
> > +    unsafe fn alloc(
> > +        &mut self,
> > +        limit: impl ops::RangeBounds<u32>,
> > +        ptr: *mut T::PointedTo,
> > +        gfp: alloc::Flags,
> > +    ) -> Result<usize> {
> > +        // NB: `xa_limit::{max,min}` are inclusive.
> > +        let limit = bindings::xa_limit {
> > +            max: match limit.end_bound() {
> > +                ops::Bound::Included(&end) => end,
> > +                ops::Bound::Excluded(&end) => end - 1,
> > +                ops::Bound::Unbounded => u32::MAX,
> > +            },
> > +            min: match limit.start_bound() {
> > +                ops::Bound::Included(&start) => start,
> > +                ops::Bound::Excluded(&start) => start + 1,
> > +                ops::Bound::Unbounded => 0,
> > +            },
> > +        };
> > +
> > +        let mut index = u32::MAX;
> > +
> > +        // SAFETY:
> > +        // - `self.xa` is always valid by the type invariant.
> > +        // - `self.xa` was initialized with `XA_FLAGS_ALLOC` or `XA_FLAGS_ALLOC1`.
> > +        //
> > +        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign`.
> > +        match unsafe {
> > +            bindings::__xa_alloc(
> > +                self.xa.xa.get(),
> > +                &mut index,
> > +                ptr.cast(),
> > +                limit,
> > +                gfp.as_raw(),
> > +            )
> > +        } {
> > +            0 => Ok(to_usize(index)),
> > +            errno => Err(Error::from_errno(errno)),
> > +        }
> > +    }
> > +
> > +    /// Allocates an entry somewhere in the array.
> > +    ///
> > +    /// On success, returns the index at which the entry was stored.
> > +    ///
> > +    /// On failure, returns the entry which was attempted to be stored.
> > +    pub fn insert_limit(
> > +        &mut self,
> > +        limit: impl ops::RangeBounds<u32>,
> > +        value: T,
> > +        gfp: alloc::Flags,
> > +    ) -> Result<usize, StoreError<T>> {
> > +        build_assert!(
> > +            mem::align_of::<T::PointedTo>() >= 4,
> > +            "pointers stored in XArray must be 4-byte aligned"
> > +        );
> > +        let ptr = value.into_foreign();
> > +        // SAFETY: `ptr` came from `T::into_foreign`.
> > +        unsafe { self.alloc(limit, ptr, gfp) }.map_err(|error| {
> > +            // SAFETY: `ptr` came from `T::into_foreign` and `self.alloc` does not take ownership of
> > +            // the value on error.
> > +            let value = unsafe { T::from_foreign(ptr) };
> > +            StoreError { value, error }
> > +        })
> > +    }
> > +
> > +    /// Reserves an entry in the array.
> > +    pub fn reserve(&mut self, index: usize, gfp: alloc::Flags) -> Result<Reservation<'a, T>> {
> > +        // NB: `__xa_insert` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress.
> > +        let ptr = null_mut();
> > +        // SAFETY: `self.xa` is always valid by the type invariant.
> > +        //
> > +        // INVARIANT: `ptr` is `NULL`.
> > +        match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr, gfp.as_raw()) } {
> > +            0 => Ok(Reservation { xa: self.xa, index }),
> > +            errno => Err(Error::from_errno(errno)),
> > +        }
> > +    }
> > +
> > +    /// Reserves an entry somewhere in the array.
> > +    pub fn reserve_limit(
> > +        &mut self,
> > +        limit: impl ops::RangeBounds<u32>,
> > +        gfp: alloc::Flags,
> > +    ) -> Result<Reservation<'a, T>> {
> > +        // NB: `__xa_alloc` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress.
> > +        let ptr = null_mut();
> > +        // SAFETY: `ptr` is `NULL`.
> > +        unsafe { self.alloc(limit, ptr, gfp) }.map(|index| Reservation { xa: self.xa, index })
> > +    }
> > +}
> > +
> > +/// A reserved slot in an array.
> > +///
> > +/// The slot is released when the reservation goes out of scope.
> > +///
> > +/// Note that the array lock *must not* be held when the reservation is filled or dropped as this
> > +/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be
> > +/// used in context where the array lock is held.
> > +#[must_use = "the reservation is released immediately when the reservation is unused"]
> > +pub struct Reservation<'a, T: ForeignOwnable> {
> > +    xa: &'a XArray<T>,
> > +    index: usize,
> > +}
> > +
> > +impl<T: ForeignOwnable> fmt::Debug for Reservation<'_, T> {
> > +    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
> > +        f.debug_struct("Reservation")
> > +            .field("index", &self.index())
> > +            .finish()
> > +    }
> > +}
> > +
> > +impl<T: ForeignOwnable> Reservation<'_, T> {
> > +    /// Returns the index of the reservation.
> > +    pub fn index(&self) -> usize {
> > +        self.index
> > +    }
> > +
> > +    /// Replaces the reserved entry with the given entry.
> > +    ///
> > +    /// # Safety
> > +    ///
> > +    /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`.
> > +    unsafe fn replace(guard: &mut Guard<'_, T>, index: usize, ptr: *mut T::PointedTo) -> Result {
> > +        // SAFETY: `xa_zero_entry` wraps `XA_ZERO_ENTRY` which is always safe to use.
> > +        let old = unsafe { bindings::xa_zero_entry() };
> > +
> > +        // NB: `__xa_cmpxchg_raw` is used over `__xa_cmpxchg` because the latter coerces
> > +        // `XA_ZERO_ENTRY` to `NULL` on egress, which would prevent us from determining whether a
> > +        // replacement was made.
> > +        //
> > +        // SAFETY: `self.xa` is always valid by the type invariant.
> > +        //
> > +        // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign` and `old` is
> > +        // `XA_ZERO_ENTRY`.
> > +        let ret =
> > +            unsafe { bindings::__xa_cmpxchg_raw(guard.xa.xa.get(), index, old, ptr.cast(), 0) };
> > +
> > +        // SAFETY: `__xa_cmpxchg_raw` returns the old entry at this index on success or `xa_err` if
> > +        // an error happened.
> > +        match unsafe { bindings::xa_err(ret) } {
> > +            0 => {
> > +                if ret == old {
> > +                    Ok(())
> > +                } else {
> > +                    Err(EBUSY)
> > +                }
> > +            }
> > +            errno => Err(Error::from_errno(errno)),
> > +        }
> > +    }
> > +
> > +    fn fill_inner(&self, guard: Option<&mut Guard<'_, T>>, value: T) -> Result<(), StoreError<T>> {
> > +        let Self { xa, index } = self;
> > +        let index = *index;
> > +
> > +        let ptr = value.into_foreign();
> > +        xa.with_guard(guard, |guard| {
> > +            // SAFETY: `ptr` came from `T::into_foreign`.
> > +            unsafe { Self::replace(guard, index, ptr) }
> > +        })
> > +        .map_err(|error| {
> > +            // SAFETY: `ptr` came from `T::into_foreign` and `Self::replace` does not take ownership
> > +            // of the value on error.
> > +            let value = unsafe { T::from_foreign(ptr) };
> > +            StoreError { value, error }
> > +        })
> > +    }
> > +
> > +    /// Fills the reservation.
> > +    pub fn fill(self, value: T) -> Result<(), StoreError<T>> {
> > +        let result = self.fill_inner(None, value);
> > +        mem::forget(self);
> > +        result
> > +    }
> > +
> > +    /// Fills the reservation without acquiring the array lock.
> > +    ///
> > +    /// # Panics
> > +    ///
> > +    /// Panics if the passed guard locks a different array.
> > +    pub fn fill_locked(self, guard: &mut Guard<'_, T>, value: T) -> Result<(), StoreError<T>> {
> > +        let result = self.fill_inner(Some(guard), value);
> > +        mem::forget(self);
> > +        result
> > +    }
> > +
> > +    fn release_inner(&self, guard: Option<&mut Guard<'_, T>>) -> Result {
> > +        let Self { xa, index } = self;
> > +        let index = *index;
> > +
> > +        xa.with_guard(guard, |guard| {
> > +            let ptr = null_mut();
> > +            // SAFETY: `ptr` is `NULL`.
> > +            unsafe { Self::replace(guard, index, ptr) }
> > +        })
> > +    }
> > +
> > +    /// Releases the reservation without acquiring the array lock.
> > +    ///
> > +    /// # Panics
> > +    ///
> > +    /// Panics if the passed guard locks a different array.
> > +    pub fn release_locked(self, guard: &mut Guard<'_, T>) -> Result {
> > +        let result = self.release_inner(Some(guard));
> > +        mem::forget(self);
> > +        result
> > +    }
> > +}
> > +
> > +impl<T: ForeignOwnable> Drop for Reservation<'_, T> {
> > +    fn drop(&mut self) {
> > +        // NB: Errors here are possible since `Guard::store` does not honor reservations.
> > +        let _: Result = self.release_inner(None);
> This seems bit risky as one can drop the reservation while still holding the
> lock?

Yes, that's true. The only way to avoid it would be to make the
reservation borrowed from the guard, but that would exclude usage
patterns where the caller wants to reserve and fulfill in different
critical sections.

Do you have a specific suggestion?

> > +    }
> >  }
> >
> >  // SAFETY: `XArray<T>` has no shared mutable state so it is `Send` iff `T` is `Send`.
> > @@ -282,3 +617,136 @@ unsafe impl<T: ForeignOwnable + Send> Send for XArray<T> {}
> >  // SAFETY: `XArray<T>` serialises the interior mutability it provides so it is `Sync` iff `T` is
> >  // `Send`.
> >  unsafe impl<T: ForeignOwnable + Send> Sync for XArray<T> {}
> > +
> > +#[macros::kunit_tests(rust_xarray_kunit)]
> > +mod tests {
> > +    use super::*;
> > +    use pin_init::stack_pin_init;
> > +
> > +    fn new_kbox<T>(value: T) -> Result<KBox<T>> {
> > +        KBox::new(value, GFP_KERNEL).map_err(Into::into)
> I believe this should be GFP_ATOMIC as it is being called while holding the xa
> lock.

I'm not sure what you mean - this function can be called in any
context, and besides: it is test-only code.

>
> Otherwise:
>
> Tested-By: Beata Michalska <beata.michalska@arm.com>

Thanks!
Tamir

>
> ---
> BR
> Beata
> > +    }
> > +
> > +    #[test]
> > +    fn test_alloc_kind_alloc() -> Result {
> > +        test_alloc_kind(AllocKind::Alloc, 0)
> > +    }
> > +
> > +    #[test]
> > +    fn test_alloc_kind_alloc1() -> Result {
> > +        test_alloc_kind(AllocKind::Alloc1, 1)
> > +    }
> > +
> > +    fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result {
> > +        stack_pin_init!(let xa = XArray::new(kind));
> > +        let mut guard = xa.lock();
> > +
> > +        let reservation = guard.reserve_limit(.., GFP_KERNEL)?;
> > +        assert_eq!(reservation.index(), expected_index);
> > +        reservation.release_locked(&mut guard)?;
> > +
> > +        let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL);
> > +        assert!(insertion.is_ok());
> > +        let insertion_index = insertion.unwrap();
> > +        assert_eq!(insertion_index, expected_index);
> > +
> > +        Ok(())
> > +    }
> > +
> > +    #[test]
> > +    fn test_insert_and_reserve_interaction() -> Result {
> > +        const IDX: usize = 0x1337;
> > +
> > +        fn insert<T: ForeignOwnable>(
> > +            guard: &mut Guard<'_, T>,
> > +            value: T,
> > +        ) -> Result<(), StoreError<T>> {
> > +            guard.insert(IDX, value, GFP_KERNEL)
> > +        }
> > +
> > +        fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result<Reservation<'a, T>> {
> > +            guard.reserve(IDX, GFP_KERNEL)
> > +        }
> > +
> > +        #[track_caller]
> > +        fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox<usize>>) -> Result {
> > +            // Insertion fails.
> > +            {
> > +                let beef = new_kbox(0xbeef)?;
> > +                let ret = insert(guard, beef);
> > +                assert!(ret.is_err());
> > +                let StoreError { error, value } = ret.unwrap_err();
> > +                assert_eq!(error, EBUSY);
> > +                assert_eq!(*value, 0xbeef);
> > +            }
> > +
> > +            // Reservation fails.
> > +            {
> > +                let ret = reserve(guard);
> > +                assert!(ret.is_err());
> > +                assert_eq!(ret.unwrap_err(), EBUSY);
> > +            }
> > +
> > +            Ok(())
> > +        }
> > +
> > +        stack_pin_init!(let xa = XArray::new(Default::default()));
> > +        let mut guard = xa.lock();
> > +
> > +        // Vacant.
> > +        assert_eq!(guard.get(IDX), None);
> > +
> > +        // Reservation succeeds.
> > +        let reservation = {
> > +            let ret = reserve(&mut guard);
> > +            assert!(ret.is_ok());
> > +            ret.unwrap()
> > +        };
> > +
> > +        // Reserved presents as vacant.
> > +        assert_eq!(guard.get(IDX), None);
> > +
> > +        check_not_vacant(&mut guard)?;
> > +
> > +        // Release reservation.
> > +        {
> > +            let ret = reservation.release_locked(&mut guard);
> > +            assert!(ret.is_ok());
> > +            let () = ret.unwrap();
> > +        }
> > +
> > +        // Vacant again.
> > +        assert_eq!(guard.get(IDX), None);
> > +
> > +        // Insert succeeds.
> > +        {
> > +            let dead = new_kbox(0xdead)?;
> > +            let ret = insert(&mut guard, dead);
> > +            assert!(ret.is_ok());
> > +            let () = ret.unwrap();
> > +        }
> > +
> > +        check_not_vacant(&mut guard)?;
> > +
> > +        // Remove.
> > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead));
> > +
> > +        // Reserve and fill.
> > +        {
> > +            let beef = new_kbox(0xbeef)?;
> > +            let ret = reserve(&mut guard);
> > +            assert!(ret.is_ok());
> > +            let reservation = ret.unwrap();
> > +            let ret = reservation.fill_locked(&mut guard, beef);
> > +            assert!(ret.is_ok());
> > +            let () = ret.unwrap();
> > +        };
> > +
> > +        check_not_vacant(&mut guard)?;
> > +
> > +        // Remove.
> > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef));
> > +
> > +        Ok(())
> > +    }
> > +}
> >
> > --
> > 2.50.1
> >
> >
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Beata Michalska 1 month, 3 weeks ago
On Mon, Aug 11, 2025 at 09:09:56AM -0400, Tamir Duberstein wrote:
> On Mon, Aug 11, 2025 at 8:57 AM Beata Michalska <beata.michalska@arm.com> wrote:
> >
> > Hi Tamir,
> >
> > Apologies for such a late drop.
> 
> Hi Beata, no worries, thanks for your review.
> 
> >
> > On Sun, Jul 13, 2025 at 08:05:49AM -0400, Tamir Duberstein wrote:
[snip] ...
> > > +/// A reserved slot in an array.
> > > +///
> > > +/// The slot is released when the reservation goes out of scope.
> > > +///
> > > +/// Note that the array lock *must not* be held when the reservation is filled or dropped as this
> > > +/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be
> > > +/// used in context where the array lock is held.
> > > +#[must_use = "the reservation is released immediately when the reservation is unused"]
> > > +pub struct Reservation<'a, T: ForeignOwnable> {
> > > +    xa: &'a XArray<T>,
> > > +    index: usize,
> > > +}
> > > +
[snip] ...
> > > +
> > > +impl<T: ForeignOwnable> Drop for Reservation<'_, T> {
> > > +    fn drop(&mut self) {
> > > +        // NB: Errors here are possible since `Guard::store` does not honor reservations.
> > > +        let _: Result = self.release_inner(None);
> > This seems bit risky as one can drop the reservation while still holding the
> > lock?
> 
> Yes, that's true. The only way to avoid it would be to make the
> reservation borrowed from the guard, but that would exclude usage
> patterns where the caller wants to reserve and fulfill in different
> critical sections.
> 
> Do you have a specific suggestion?
I guess we could try with locked vs unlocked `Reservation' types, which would
have different Drop implementations, and providing a way to convert locked into
unlocked. Just thinking out loud, so no, nothing specific here.
At very least we could add 'rust_helper_spin_assert_is_held() ?'
>
> > > +    }
> > >  }
> > >
> > >  // SAFETY: `XArray<T>` has no shared mutable state so it is `Send` iff `T` is `Send`.
> > > @@ -282,3 +617,136 @@ unsafe impl<T: ForeignOwnable + Send> Send for XArray<T> {}
> > >  // SAFETY: `XArray<T>` serialises the interior mutability it provides so it is `Sync` iff `T` is
> > >  // `Send`.
> > >  unsafe impl<T: ForeignOwnable + Send> Sync for XArray<T> {}
> > > +
> > > +#[macros::kunit_tests(rust_xarray_kunit)]
> > > +mod tests {
> > > +    use super::*;
> > > +    use pin_init::stack_pin_init;
> > > +
> > > +    fn new_kbox<T>(value: T) -> Result<KBox<T>> {
> > > +        KBox::new(value, GFP_KERNEL).map_err(Into::into)
> > I believe this should be GFP_ATOMIC as it is being called while holding the xa
> > lock.
> 
> I'm not sure what you mean - this function can be called in any
> context, and besides: it is test-only code.
Actually it cannot: allocations using GFP_KERNEL can sleep so should not be
called from atomic context, which is what is happening in the test cases.

---
BR
Beata
> 
> >
> > Otherwise:
> >
> > Tested-By: Beata Michalska <beata.michalska@arm.com>
> 
> Thanks!
> Tamir
> 
> >
> > ---
> > BR
> > Beata
> > > +    }
> > > +
> > > +    #[test]
> > > +    fn test_alloc_kind_alloc() -> Result {
> > > +        test_alloc_kind(AllocKind::Alloc, 0)
> > > +    }
> > > +
> > > +    #[test]
> > > +    fn test_alloc_kind_alloc1() -> Result {
> > > +        test_alloc_kind(AllocKind::Alloc1, 1)
> > > +    }
> > > +
> > > +    fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result {
> > > +        stack_pin_init!(let xa = XArray::new(kind));
> > > +        let mut guard = xa.lock();
> > > +
> > > +        let reservation = guard.reserve_limit(.., GFP_KERNEL)?;
> > > +        assert_eq!(reservation.index(), expected_index);
> > > +        reservation.release_locked(&mut guard)?;
> > > +
> > > +        let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL);
> > > +        assert!(insertion.is_ok());
> > > +        let insertion_index = insertion.unwrap();
> > > +        assert_eq!(insertion_index, expected_index);
> > > +
> > > +        Ok(())
> > > +    }
> > > +
> > > +    #[test]
> > > +    fn test_insert_and_reserve_interaction() -> Result {
> > > +        const IDX: usize = 0x1337;
> > > +
> > > +        fn insert<T: ForeignOwnable>(
> > > +            guard: &mut Guard<'_, T>,
> > > +            value: T,
> > > +        ) -> Result<(), StoreError<T>> {
> > > +            guard.insert(IDX, value, GFP_KERNEL)
> > > +        }
> > > +
> > > +        fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result<Reservation<'a, T>> {
> > > +            guard.reserve(IDX, GFP_KERNEL)
> > > +        }
> > > +
> > > +        #[track_caller]
> > > +        fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox<usize>>) -> Result {
> > > +            // Insertion fails.
> > > +            {
> > > +                let beef = new_kbox(0xbeef)?;
> > > +                let ret = insert(guard, beef);
> > > +                assert!(ret.is_err());
> > > +                let StoreError { error, value } = ret.unwrap_err();
> > > +                assert_eq!(error, EBUSY);
> > > +                assert_eq!(*value, 0xbeef);
> > > +            }
> > > +
> > > +            // Reservation fails.
> > > +            {
> > > +                let ret = reserve(guard);
> > > +                assert!(ret.is_err());
> > > +                assert_eq!(ret.unwrap_err(), EBUSY);
> > > +            }
> > > +
> > > +            Ok(())
> > > +        }
> > > +
> > > +        stack_pin_init!(let xa = XArray::new(Default::default()));
> > > +        let mut guard = xa.lock();
> > > +
> > > +        // Vacant.
> > > +        assert_eq!(guard.get(IDX), None);
> > > +
> > > +        // Reservation succeeds.
> > > +        let reservation = {
> > > +            let ret = reserve(&mut guard);
> > > +            assert!(ret.is_ok());
> > > +            ret.unwrap()
> > > +        };
> > > +
> > > +        // Reserved presents as vacant.
> > > +        assert_eq!(guard.get(IDX), None);
> > > +
> > > +        check_not_vacant(&mut guard)?;
> > > +
> > > +        // Release reservation.
> > > +        {
> > > +            let ret = reservation.release_locked(&mut guard);
> > > +            assert!(ret.is_ok());
> > > +            let () = ret.unwrap();
> > > +        }
> > > +
> > > +        // Vacant again.
> > > +        assert_eq!(guard.get(IDX), None);
> > > +
> > > +        // Insert succeeds.
> > > +        {
> > > +            let dead = new_kbox(0xdead)?;
> > > +            let ret = insert(&mut guard, dead);
> > > +            assert!(ret.is_ok());
> > > +            let () = ret.unwrap();
> > > +        }
> > > +
> > > +        check_not_vacant(&mut guard)?;
> > > +
> > > +        // Remove.
> > > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead));
> > > +
> > > +        // Reserve and fill.
> > > +        {
> > > +            let beef = new_kbox(0xbeef)?;
> > > +            let ret = reserve(&mut guard);
> > > +            assert!(ret.is_ok());
> > > +            let reservation = ret.unwrap();
> > > +            let ret = reservation.fill_locked(&mut guard, beef);
> > > +            assert!(ret.is_ok());
> > > +            let () = ret.unwrap();
> > > +        };
> > > +
> > > +        check_not_vacant(&mut guard)?;
> > > +
> > > +        // Remove.
> > > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef));
> > > +
> > > +        Ok(())
> > > +    }
> > > +}
> > >
> > > --
> > > 2.50.1
> > >
> > >
> 
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Tamir Duberstein 1 month, 3 weeks ago
On Mon, Aug 11, 2025 at 10:35 AM Beata Michalska
<beata.michalska@arm.com> wrote:
>
> On Mon, Aug 11, 2025 at 09:09:56AM -0400, Tamir Duberstein wrote:
> > On Mon, Aug 11, 2025 at 8:57 AM Beata Michalska <beata.michalska@arm.com> wrote:
> > >
> > > Hi Tamir,
> > >
> > > Apologies for such a late drop.
> >
> > Hi Beata, no worries, thanks for your review.
> >
> > >
> > > On Sun, Jul 13, 2025 at 08:05:49AM -0400, Tamir Duberstein wrote:
> [snip] ...
> > > > +/// A reserved slot in an array.
> > > > +///
> > > > +/// The slot is released when the reservation goes out of scope.
> > > > +///
> > > > +/// Note that the array lock *must not* be held when the reservation is filled or dropped as this
> > > > +/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be
> > > > +/// used in context where the array lock is held.
> > > > +#[must_use = "the reservation is released immediately when the reservation is unused"]
> > > > +pub struct Reservation<'a, T: ForeignOwnable> {
> > > > +    xa: &'a XArray<T>,
> > > > +    index: usize,
> > > > +}
> > > > +
> [snip] ...
> > > > +
> > > > +impl<T: ForeignOwnable> Drop for Reservation<'_, T> {
> > > > +    fn drop(&mut self) {
> > > > +        // NB: Errors here are possible since `Guard::store` does not honor reservations.
> > > > +        let _: Result = self.release_inner(None);
> > > This seems bit risky as one can drop the reservation while still holding the
> > > lock?
> >
> > Yes, that's true. The only way to avoid it would be to make the
> > reservation borrowed from the guard, but that would exclude usage
> > patterns where the caller wants to reserve and fulfill in different
> > critical sections.
> >
> > Do you have a specific suggestion?
> I guess we could try with locked vs unlocked `Reservation' types, which would
> have different Drop implementations, and providing a way to convert locked into
> unlocked. Just thinking out loud, so no, nothing specific here.
> At very least we could add 'rust_helper_spin_assert_is_held() ?'

I don't see how having two types of reservations would help.

Can you help me understand how you'd use `rust_helper_spin_assert_is_held` here?

> >
> > > > +    }
> > > >  }
> > > >
> > > >  // SAFETY: `XArray<T>` has no shared mutable state so it is `Send` iff `T` is `Send`.
> > > > @@ -282,3 +617,136 @@ unsafe impl<T: ForeignOwnable + Send> Send for XArray<T> {}
> > > >  // SAFETY: `XArray<T>` serialises the interior mutability it provides so it is `Sync` iff `T` is
> > > >  // `Send`.
> > > >  unsafe impl<T: ForeignOwnable + Send> Sync for XArray<T> {}
> > > > +
> > > > +#[macros::kunit_tests(rust_xarray_kunit)]
> > > > +mod tests {
> > > > +    use super::*;
> > > > +    use pin_init::stack_pin_init;
> > > > +
> > > > +    fn new_kbox<T>(value: T) -> Result<KBox<T>> {
> > > > +        KBox::new(value, GFP_KERNEL).map_err(Into::into)
> > > I believe this should be GFP_ATOMIC as it is being called while holding the xa
> > > lock.
> >
> > I'm not sure what you mean - this function can be called in any
> > context, and besides: it is test-only code.
> Actually it cannot: allocations using GFP_KERNEL can sleep so should not be
> called from atomic context, which is what is happening in the test cases.

I see. There are no threads involved in these tests, so I think it is
just fine to sleep with this particular lock held. Can you help me
understand why this is incorrect?

>
> ---
> BR
> Beata
> >
> > >
> > > Otherwise:
> > >
> > > Tested-By: Beata Michalska <beata.michalska@arm.com>
> >
> > Thanks!
> > Tamir
> >
> > >
> > > ---
> > > BR
> > > Beata
> > > > +    }
> > > > +
> > > > +    #[test]
> > > > +    fn test_alloc_kind_alloc() -> Result {
> > > > +        test_alloc_kind(AllocKind::Alloc, 0)
> > > > +    }
> > > > +
> > > > +    #[test]
> > > > +    fn test_alloc_kind_alloc1() -> Result {
> > > > +        test_alloc_kind(AllocKind::Alloc1, 1)
> > > > +    }
> > > > +
> > > > +    fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result {
> > > > +        stack_pin_init!(let xa = XArray::new(kind));
> > > > +        let mut guard = xa.lock();
> > > > +
> > > > +        let reservation = guard.reserve_limit(.., GFP_KERNEL)?;
> > > > +        assert_eq!(reservation.index(), expected_index);
> > > > +        reservation.release_locked(&mut guard)?;
> > > > +
> > > > +        let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL);
> > > > +        assert!(insertion.is_ok());
> > > > +        let insertion_index = insertion.unwrap();
> > > > +        assert_eq!(insertion_index, expected_index);
> > > > +
> > > > +        Ok(())
> > > > +    }
> > > > +
> > > > +    #[test]
> > > > +    fn test_insert_and_reserve_interaction() -> Result {
> > > > +        const IDX: usize = 0x1337;
> > > > +
> > > > +        fn insert<T: ForeignOwnable>(
> > > > +            guard: &mut Guard<'_, T>,
> > > > +            value: T,
> > > > +        ) -> Result<(), StoreError<T>> {
> > > > +            guard.insert(IDX, value, GFP_KERNEL)
> > > > +        }
> > > > +
> > > > +        fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result<Reservation<'a, T>> {
> > > > +            guard.reserve(IDX, GFP_KERNEL)
> > > > +        }
> > > > +
> > > > +        #[track_caller]
> > > > +        fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox<usize>>) -> Result {
> > > > +            // Insertion fails.
> > > > +            {
> > > > +                let beef = new_kbox(0xbeef)?;
> > > > +                let ret = insert(guard, beef);
> > > > +                assert!(ret.is_err());
> > > > +                let StoreError { error, value } = ret.unwrap_err();
> > > > +                assert_eq!(error, EBUSY);
> > > > +                assert_eq!(*value, 0xbeef);
> > > > +            }
> > > > +
> > > > +            // Reservation fails.
> > > > +            {
> > > > +                let ret = reserve(guard);
> > > > +                assert!(ret.is_err());
> > > > +                assert_eq!(ret.unwrap_err(), EBUSY);
> > > > +            }
> > > > +
> > > > +            Ok(())
> > > > +        }
> > > > +
> > > > +        stack_pin_init!(let xa = XArray::new(Default::default()));
> > > > +        let mut guard = xa.lock();
> > > > +
> > > > +        // Vacant.
> > > > +        assert_eq!(guard.get(IDX), None);
> > > > +
> > > > +        // Reservation succeeds.
> > > > +        let reservation = {
> > > > +            let ret = reserve(&mut guard);
> > > > +            assert!(ret.is_ok());
> > > > +            ret.unwrap()
> > > > +        };
> > > > +
> > > > +        // Reserved presents as vacant.
> > > > +        assert_eq!(guard.get(IDX), None);
> > > > +
> > > > +        check_not_vacant(&mut guard)?;
> > > > +
> > > > +        // Release reservation.
> > > > +        {
> > > > +            let ret = reservation.release_locked(&mut guard);
> > > > +            assert!(ret.is_ok());
> > > > +            let () = ret.unwrap();
> > > > +        }
> > > > +
> > > > +        // Vacant again.
> > > > +        assert_eq!(guard.get(IDX), None);
> > > > +
> > > > +        // Insert succeeds.
> > > > +        {
> > > > +            let dead = new_kbox(0xdead)?;
> > > > +            let ret = insert(&mut guard, dead);
> > > > +            assert!(ret.is_ok());
> > > > +            let () = ret.unwrap();
> > > > +        }
> > > > +
> > > > +        check_not_vacant(&mut guard)?;
> > > > +
> > > > +        // Remove.
> > > > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead));
> > > > +
> > > > +        // Reserve and fill.
> > > > +        {
> > > > +            let beef = new_kbox(0xbeef)?;
> > > > +            let ret = reserve(&mut guard);
> > > > +            assert!(ret.is_ok());
> > > > +            let reservation = ret.unwrap();
> > > > +            let ret = reservation.fill_locked(&mut guard, beef);
> > > > +            assert!(ret.is_ok());
> > > > +            let () = ret.unwrap();
> > > > +        };
> > > > +
> > > > +        check_not_vacant(&mut guard)?;
> > > > +
> > > > +        // Remove.
> > > > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef));
> > > > +
> > > > +        Ok(())
> > > > +    }
> > > > +}
> > > >
> > > > --
> > > > 2.50.1
> > > >
> > > >
> >
Re: [PATCH v2 3/3] rust: xarray: add `insert` and `reserve`
Posted by Beata Michalska 1 month, 3 weeks ago
On Mon, Aug 11, 2025 at 02:02:59PM -0400, Tamir Duberstein wrote:
> On Mon, Aug 11, 2025 at 10:35 AM Beata Michalska
> <beata.michalska@arm.com> wrote:
> >
> > On Mon, Aug 11, 2025 at 09:09:56AM -0400, Tamir Duberstein wrote:
> > > On Mon, Aug 11, 2025 at 8:57 AM Beata Michalska <beata.michalska@arm.com> wrote:
> > > >
> > > > Hi Tamir,
> > > >
> > > > Apologies for such a late drop.
> > >
> > > Hi Beata, no worries, thanks for your review.
> > >
> > > >
> > > > On Sun, Jul 13, 2025 at 08:05:49AM -0400, Tamir Duberstein wrote:
> > [snip] ...
> > > > > +/// A reserved slot in an array.
> > > > > +///
> > > > > +/// The slot is released when the reservation goes out of scope.
> > > > > +///
> > > > > +/// Note that the array lock *must not* be held when the reservation is filled or dropped as this
> > > > > +/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be
> > > > > +/// used in context where the array lock is held.
> > > > > +#[must_use = "the reservation is released immediately when the reservation is unused"]
> > > > > +pub struct Reservation<'a, T: ForeignOwnable> {
> > > > > +    xa: &'a XArray<T>,
> > > > > +    index: usize,
> > > > > +}
> > > > > +
> > [snip] ...
> > > > > +
> > > > > +impl<T: ForeignOwnable> Drop for Reservation<'_, T> {
> > > > > +    fn drop(&mut self) {
> > > > > +        // NB: Errors here are possible since `Guard::store` does not honor reservations.
> > > > > +        let _: Result = self.release_inner(None);
> > > > This seems bit risky as one can drop the reservation while still holding the
> > > > lock?
> > >
> > > Yes, that's true. The only way to avoid it would be to make the
> > > reservation borrowed from the guard, but that would exclude usage
> > > patterns where the caller wants to reserve and fulfill in different
> > > critical sections.
> > >
> > > Do you have a specific suggestion?
> > I guess we could try with locked vs unlocked `Reservation' types, which would
> > have different Drop implementations, and providing a way to convert locked into
> > unlocked. Just thinking out loud, so no, nothing specific here.
> > At very least we could add 'rust_helper_spin_assert_is_held() ?'
> 
> I don't see how having two types of reservations would help.
> 
> Can you help me understand how you'd use `rust_helper_spin_assert_is_held` here?

Probably smth like:

--- a/rust/kernel/xarray.rs
+++ b/rust/kernel/xarray.rs
@@ -188,7 +188,12 @@ fn with_guard<F, U>(&self, guard: Option<&mut Guard<'_, T>>, f: F) -> U
         F: FnOnce(&mut Guard<'_, T>) -> U,
     {
         match guard {
-            None => f(&mut self.lock()),
+            None => {
+                unsafe {
+                    bindings::spin_assert_not_held(&(*self.xa.get()).xa_lock as *const _ as *mut _);
+                }
+                f(&mut self.lock())
+            }
             Some(guard) => {
                 assert_eq!(guard.xa.xa.get(), self.xa.get());
                 f(guard)

but that requires adding the helper for 'lockdep_assert_not_held'.
That said, it is already too late as we will hit deadlock anyway.
I would have to ponder on that one a bit more to come up with smth more sensible.
> 
> > >
> > > > > +    }
> > > > >  }
> > > > >
> > > > >  // SAFETY: `XArray<T>` has no shared mutable state so it is `Send` iff `T` is `Send`.
> > > > > @@ -282,3 +617,136 @@ unsafe impl<T: ForeignOwnable + Send> Send for XArray<T> {}
> > > > >  // SAFETY: `XArray<T>` serialises the interior mutability it provides so it is `Sync` iff `T` is
> > > > >  // `Send`.
> > > > >  unsafe impl<T: ForeignOwnable + Send> Sync for XArray<T> {}
> > > > > +
> > > > > +#[macros::kunit_tests(rust_xarray_kunit)]
> > > > > +mod tests {
> > > > > +    use super::*;
> > > > > +    use pin_init::stack_pin_init;
> > > > > +
> > > > > +    fn new_kbox<T>(value: T) -> Result<KBox<T>> {
> > > > > +        KBox::new(value, GFP_KERNEL).map_err(Into::into)
> > > > I believe this should be GFP_ATOMIC as it is being called while holding the xa
> > > > lock.
> > >
> > > I'm not sure what you mean - this function can be called in any
> > > context, and besides: it is test-only code.
> > Actually it cannot: allocations using GFP_KERNEL can sleep so should not be
> > called from atomic context, which is what is happening in the test cases.
> 
> I see. There are no threads involved in these tests, so I think it is
> just fine to sleep with this particular lock held. Can you help me
> understand why this is incorrect?
Well, you won't probably see any issues with your tests, but that just seems
like a bad practice to start with.
Within the test, this is being called while in atomic context, and this simply
violates the rule of not sleeping while holding a spinlock.
The sleep might be triggered by memory reclaim which might kick in when using
'GFP_KERNEL' flag. Which is why non-sleeping allocation should be used through
'GFP_ATOMIC' or 'GFP_NOWAIT'. So it's either changing the flag or moving the
allocation outside of the atomic section.


---
I will be off for next week so apologies in any delays in replying.

BR
Beata
> 
> >
> > ---
> > BR
> > Beata
> > >
> > > >
> > > > Otherwise:
> > > >
> > > > Tested-By: Beata Michalska <beata.michalska@arm.com>
> > >
> > > Thanks!
> > > Tamir
> > >
> > > >
> > > > ---
> > > > BR
> > > > Beata
> > > > > +    }
> > > > > +
> > > > > +    #[test]
> > > > > +    fn test_alloc_kind_alloc() -> Result {
> > > > > +        test_alloc_kind(AllocKind::Alloc, 0)
> > > > > +    }
> > > > > +
> > > > > +    #[test]
> > > > > +    fn test_alloc_kind_alloc1() -> Result {
> > > > > +        test_alloc_kind(AllocKind::Alloc1, 1)
> > > > > +    }
> > > > > +
> > > > > +    fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result {
> > > > > +        stack_pin_init!(let xa = XArray::new(kind));
> > > > > +        let mut guard = xa.lock();
> > > > > +
> > > > > +        let reservation = guard.reserve_limit(.., GFP_KERNEL)?;
> > > > > +        assert_eq!(reservation.index(), expected_index);
> > > > > +        reservation.release_locked(&mut guard)?;
> > > > > +
> > > > > +        let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL);
> > > > > +        assert!(insertion.is_ok());
> > > > > +        let insertion_index = insertion.unwrap();
> > > > > +        assert_eq!(insertion_index, expected_index);
> > > > > +
> > > > > +        Ok(())
> > > > > +    }
> > > > > +
> > > > > +    #[test]
> > > > > +    fn test_insert_and_reserve_interaction() -> Result {
> > > > > +        const IDX: usize = 0x1337;
> > > > > +
> > > > > +        fn insert<T: ForeignOwnable>(
> > > > > +            guard: &mut Guard<'_, T>,
> > > > > +            value: T,
> > > > > +        ) -> Result<(), StoreError<T>> {
> > > > > +            guard.insert(IDX, value, GFP_KERNEL)
> > > > > +        }
> > > > > +
> > > > > +        fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result<Reservation<'a, T>> {
> > > > > +            guard.reserve(IDX, GFP_KERNEL)
> > > > > +        }
> > > > > +
> > > > > +        #[track_caller]
> > > > > +        fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox<usize>>) -> Result {
> > > > > +            // Insertion fails.
> > > > > +            {
> > > > > +                let beef = new_kbox(0xbeef)?;
> > > > > +                let ret = insert(guard, beef);
> > > > > +                assert!(ret.is_err());
> > > > > +                let StoreError { error, value } = ret.unwrap_err();
> > > > > +                assert_eq!(error, EBUSY);
> > > > > +                assert_eq!(*value, 0xbeef);
> > > > > +            }
> > > > > +
> > > > > +            // Reservation fails.
> > > > > +            {
> > > > > +                let ret = reserve(guard);
> > > > > +                assert!(ret.is_err());
> > > > > +                assert_eq!(ret.unwrap_err(), EBUSY);
> > > > > +            }
> > > > > +
> > > > > +            Ok(())
> > > > > +        }
> > > > > +
> > > > > +        stack_pin_init!(let xa = XArray::new(Default::default()));
> > > > > +        let mut guard = xa.lock();
> > > > > +
> > > > > +        // Vacant.
> > > > > +        assert_eq!(guard.get(IDX), None);
> > > > > +
> > > > > +        // Reservation succeeds.
> > > > > +        let reservation = {
> > > > > +            let ret = reserve(&mut guard);
> > > > > +            assert!(ret.is_ok());
> > > > > +            ret.unwrap()
> > > > > +        };
> > > > > +
> > > > > +        // Reserved presents as vacant.
> > > > > +        assert_eq!(guard.get(IDX), None);
> > > > > +
> > > > > +        check_not_vacant(&mut guard)?;
> > > > > +
> > > > > +        // Release reservation.
> > > > > +        {
> > > > > +            let ret = reservation.release_locked(&mut guard);
> > > > > +            assert!(ret.is_ok());
> > > > > +            let () = ret.unwrap();
> > > > > +        }
> > > > > +
> > > > > +        // Vacant again.
> > > > > +        assert_eq!(guard.get(IDX), None);
> > > > > +
> > > > > +        // Insert succeeds.
> > > > > +        {
> > > > > +            let dead = new_kbox(0xdead)?;
> > > > > +            let ret = insert(&mut guard, dead);
> > > > > +            assert!(ret.is_ok());
> > > > > +            let () = ret.unwrap();
> > > > > +        }
> > > > > +
> > > > > +        check_not_vacant(&mut guard)?;
> > > > > +
> > > > > +        // Remove.
> > > > > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead));
> > > > > +
> > > > > +        // Reserve and fill.
> > > > > +        {
> > > > > +            let beef = new_kbox(0xbeef)?;
> > > > > +            let ret = reserve(&mut guard);
> > > > > +            assert!(ret.is_ok());
> > > > > +            let reservation = ret.unwrap();
> > > > > +            let ret = reservation.fill_locked(&mut guard, beef);
> > > > > +            assert!(ret.is_ok());
> > > > > +            let () = ret.unwrap();
> > > > > +        };
> > > > > +
> > > > > +        check_not_vacant(&mut guard)?;
> > > > > +
> > > > > +        // Remove.
> > > > > +        assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef));
> > > > > +
> > > > > +        Ok(())
> > > > > +    }
> > > > > +}
> > > > >
> > > > > --
> > > > > 2.50.1
> > > > >
> > > > >
> > >