From nobody Sat Feb 7 16:05:05 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7B15E3101D8; Fri, 6 Feb 2026 21:11:56 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770412316; cv=none; b=IYzHnHNzQeCLCeAR+trdLFItH06mOXRD6PyrxY30C+ibHiz7Bg7lYnEWUmyT50uvkJxN9Kmx/JoXr7fhhX/yVJXvAphtGqT0NR8pM0lebkqgXn03hsy9c5c9ckFVi4aHiO+o2MvJSjYS/BfvyxI3fvYlM6xe13OsLS5mojy8D9A= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770412316; c=relaxed/simple; bh=yDktjoOvxUj5UzZCRZYJztqCsy8J1BqG4VuAbKzhGVM=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=m5TcMQNCXKHyj4UkO3cKqsQBVapgbzbXoFw4h6V4sOVvsE9mOOYnnZ9uuqd88HMwuwBlH9TroU6qcR1xdHq2bvwjyQhr2vZOoWNxNfyEoBSYtZnObbiG0OIpdGB2y/hn0w3noNl9gbGnbcg8X1MCO2V9oujRdU0EXZYqkMxKf38= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=QYuRK1L3; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="QYuRK1L3" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 975CEC116C6; Fri, 6 Feb 2026 21:11:51 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770412316; bh=yDktjoOvxUj5UzZCRZYJztqCsy8J1BqG4VuAbKzhGVM=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=QYuRK1L3VHaNyOyxJ2zyBeowLDBfuXpuxz9w1TFXWWhcRd0T/qinNnrDJUHG2j7XM lv09TXjQIOF15GtPyMSFsHA625gMUdQLXeKPMuqkomkkywNsGoHFsNo3PCYC/g7Fbd 7IhEtmaRqsgUUboVe3TfYEvTbLdvPWbSEWBon3C18RpFQ6t3mQTKupXB004TYLmTX5 VCY36Jc9VVbARvBjR+rgIba8qrPvT9oiJ95Yp6ewPifjsHEc+vmxn4OutNJWW0Eijf VQa2yWubU14qJ0/3qHdmGH2BgwTaqcXASbXTBh/Sr2CEbwS5i+h3PoKr4uRuSkuQc7 XMMJ5V0krH2ug== From: Andreas Hindborg Date: Fri, 06 Feb 2026 22:10:56 +0100 Subject: [PATCH v2 10/11] rust: mm: sheaf: allow use of C initialized static caches Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260206-xarray-entry-send-v2-10-91c41673fd30@kernel.org> References: <20260206-xarray-entry-send-v2-0-91c41673fd30@kernel.org> In-Reply-To: <20260206-xarray-entry-send-v2-0-91c41673fd30@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg , "Matthew Wilcox (Oracle)" X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=21503; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=yDktjoOvxUj5UzZCRZYJztqCsy8J1BqG4VuAbKzhGVM=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBphljqqXLQm4Ui2fm/F6g9svnOtGV2NsNdN5ysM sWS7I1xBnSJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYZY6gAKCRDhuBo+eShj d+0CD/9025QcoAqR2C6buBG07j9NGla+7qRh9BZ6UcaxA0EoY0EyZpG6S1ZK2O+gsFQtzjtmUN/ 5m83jltkZY6db81CW3loYUGG5ew6T319jWJNRAHBjXsvzLNpvbv/dCcZQxgTcthwlb4KCVaW9yu bG3zCr5yBTo1hfYnn8CkZ6nI4p+CXmcJXgjrPuiOPO4f7+MKAYEfufz7aksZingeXS8E+X6KDRz FHGK7RIzc0IX3x1JLvrYPDZcU4QVGUMvaKJxMKkLYDmKh8tFasMZ3iBTkOYZAsgEGFY8QmCIctT 8AIG0iTL5H5XAOVmMUcHloS4P4lrlo0fL+4GlhlUYbPJrHmi3uSdsr27RIZb87a0hyN4IIBjsPc GiN2fJ4tIISN7WLdcoPQJysZq5SKgqhHeHwPg2eKgZY+0D+RiXA5eK32C6wM3payqUCRHkuaB9X 5BpygRvMnwEquQX6ecAABGyL3faxc6v7Au0CY/vjML3Pa6GrIlKThKNkI/EM98rz01fWVVzlBZN AMbG1gMcKsBfkW8u00ZIChWLUhiQs5zChW7Bw4xy3vmQFF0fwRJgta5hBldAyq9o9G2iCk8UJ7I 5Q19TXoYepiJd+owPGFuMqGaECnwcyaWPKiy+vnFSPnZ5KGcN8D6uZ8n0C8zg69p7yUKSZERsWK vRTmCN8xgrfMyrQ== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Extend the sheaf abstraction to support caches initialized by C at kernel boot time, in addition to dynamically created Rust caches. Introduce `KMemCache` as a transparent wrapper around `kmem_cache` for static caches with `'static` lifetime. Rename the previous `KMemCache` to `KMemCacheHandle` to represent dynamically created, reference-counted caches. Add `Static` and `Dynamic` marker types along with `StaticSheaf` and `DynamicSheaf` type aliases to distinguish sheaves from each cache type. The `Sheaf` type now carries lifetime and allocation mode type parameters. Add `SBox::into_ptr()` and `SBox::static_from_ptr()` methods for passing allocations through C code via raw pointers. Add `KMemCache::from_raw()` for wrapping C-initialized static caches and `Sheaf::refill()` for replenishing a sheaf to a minimum size. Export `kmem_cache_prefill_sheaf`, `kmem_cache_return_sheaf`, `kmem_cache_refill_sheaf`, and `kmem_cache_alloc_from_sheaf_noprof` to allow Rust module code to use the sheaf API. Cc: Vlastimil Babka Cc: "Liam R. Howlett" Cc: "Matthew Wilcox (Oracle)" Cc: Lorenzo Stoakes Cc: linux-mm@kvack.org Signed-off-by: Andreas Hindborg --- mm/slub.c | 4 + rust/kernel/mm/sheaf.rs | 343 +++++++++++++++++++++++++++++++++++++++++++-= ---- 2 files changed, 317 insertions(+), 30 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index f77b7407c51bc..7c6b1d28778d0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5428,6 +5428,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t = gfp, unsigned int size) =20 return sheaf; } +EXPORT_SYMBOL(kmem_cache_prefill_sheaf); =20 /* * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() @@ -5483,6 +5484,7 @@ void kmem_cache_return_sheaf(struct kmem_cache *s, gf= p_t gfp, barn_put_full_sheaf(barn, sheaf); stat(s, BARN_PUT); } +EXPORT_SYMBOL(kmem_cache_return_sheaf); =20 /* * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at le= ast @@ -5536,6 +5538,7 @@ int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp= _t gfp, *sheafp =3D sheaf; return 0; } +EXPORT_SYMBOL(kmem_cache_refill_sheaf); =20 /* * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() @@ -5573,6 +5576,7 @@ kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache = *s, gfp_t gfp, =20 return ret; } +EXPORT_SYMBOL(kmem_cache_alloc_from_sheaf_noprof); =20 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf) { diff --git a/rust/kernel/mm/sheaf.rs b/rust/kernel/mm/sheaf.rs index c92750eaf1c4a..a604246714f7b 100644 --- a/rust/kernel/mm/sheaf.rs +++ b/rust/kernel/mm/sheaf.rs @@ -23,17 +23,26 @@ //! //! # Architecture //! -//! The sheaf system consists of three main components: +//! The sheaf system supports two modes of operation: +//! +//! - **Static caches**: [`KMemCache`] represents a cache initialized by C= code at +//! kernel boot time. These have `'static` lifetime and produce [`Static= Sheaf`] +//! instances. +//! - **Dynamic caches**: [`KMemCacheHandle`] wraps a cache created at run= time by +//! Rust code. These are reference-counted and produce [`DynamicSheaf`] = instances. +//! +//! Both modes use the same core types: //! -//! - [`KMemCache`]: A slab cache configured with sheaf support. //! - [`Sheaf`]: A pre-filled container of objects from a specific cache. //! - [`SBox`]: An owned allocation from a sheaf, similar to a `Box`. //! //! # Example //! +//! Using a dynamically created cache: +//! //! ``` //! use kernel::c_str; -//! use kernel::mm::sheaf::{KMemCache, KMemCacheInit, Sheaf, SBox}; +//! use kernel::mm::sheaf::{KMemCacheHandle, KMemCacheInit, Sheaf, SBox}; //! use kernel::prelude::*; //! //! struct MyObject { @@ -47,7 +56,7 @@ //! } //! //! // Create a cache with sheaf capacity of 16 objects. -//! let cache =3D KMemCache::::new(c_str!("my_cache"), 16)?; +//! let cache =3D KMemCacheHandle::::new(c_str!("my_cache"), 16)= ?; //! //! // Pre-fill a sheaf with 8 objects. //! let mut sheaf =3D cache.as_arc_borrow().sheaf(8, GFP_KERNEL)?; @@ -75,7 +84,102 @@ =20 use kernel::prelude::*; =20 -use crate::sync::{Arc, ArcBorrow}; +use crate::{ + sync::{Arc, ArcBorrow}, + types::Opaque, +}; + +/// A slab cache with sheaf support. +/// +/// This type is a transparent wrapper around a kernel `kmem_cache`. It ca= n be +/// used with caches created either by C code or via [`KMemCacheHandle`]. +/// +/// When a reference to this type has `'static` lifetime (i.e., `&'static +/// KMemCache`), it typically represents a cache initialized by C at bo= ot +/// time. Such references produce [`StaticSheaf`] instances via [`sheaf`]. +/// +/// [`sheaf`]: KMemCache::sheaf +/// +/// # Type parameter +/// +/// - `T`: The type of objects managed by this cache. Must implement +/// [`KMemCacheInit`] to provide initialization logic for allocations. +#[repr(transparent)] +pub struct KMemCache> { + inner: Opaque, + _p: PhantomData, +} + +impl> KMemCache { + /// Creates a pre-filled sheaf from this cache. + /// + /// Allocates a sheaf and pre-fills it with `size` objects. Once creat= ed, + /// allocations from the sheaf via [`Sheaf::alloc`] are guaranteed to + /// succeed until the sheaf is depleted. + /// + /// # Arguments + /// + /// - `size`: The number of objects to pre-allocate. Must not exceed t= he + /// cache's `sheaf_capacity`. + /// - `gfp`: Allocation flags controlling how memory is obtained. Use + /// [`GFP_KERNEL`] for normal allocations that may sleep, or + /// [`GFP_NOWAIT`] for non-blocking allocations. + /// + /// # Errors + /// + /// Returns [`ENOMEM`] if the sheaf or its objects could not be alloca= ted. + /// + /// # Warnings + /// + /// The kernel will warn if `size` exceeds `sheaf_capacity`. + pub fn sheaf( + &'static self, + size: usize, + gfp: kernel::alloc::Flags, + ) -> Result> { + // SAFETY: `self.as_raw()` returns a valid cache pointer, and `siz= e` + // has been validated to fit in a `c_uint`. + let ptr =3D unsafe { + bindings::kmem_cache_prefill_sheaf(self.inner.get(), gfp.as_ra= w(), size.try_into()?) + }; + + // INVARIANT: `ptr` was returned by `kmem_cache_prefill_sheaf` and= is + // non-null (checked below). `cache` is the cache from which this = sheaf + // was created. `dropped` is false since the sheaf has not been re= turned. + Ok(Sheaf { + sheaf: NonNull::new(ptr).ok_or(ENOMEM)?, + // SAFETY: `self` is a valid reference, so the pointer is non-= null. + cache: CacheRef::Static(unsafe { + NonNull::new_unchecked((&raw const *self).cast_mut()) + }), + dropped: false, + _p: PhantomData, + }) + } + + fn as_raw(&self) -> *mut bindings::kmem_cache { + self.inner.get() + } + + /// Creates a reference to a [`KMemCache`] from a raw pointer. + /// + /// This is useful for wrapping a C-initialized static `kmem_cache`, s= uch as + /// the global `radix_tree_node_cachep` used by XArrays. + /// + /// # Safety + /// + /// - `ptr` must be a valid pointer to a `kmem_cache` that was created= for + /// objects of type `T`. + /// - The cache must remain valid for the lifetime `'a`. + /// - The caller must ensure that the cache was configured appropriate= ly for + /// the type `T`, including proper size and alignment. + pub unsafe fn from_raw<'a>(ptr: *mut bindings::kmem_cache) -> &'a Self= { + // SAFETY: The caller guarantees that `ptr` is a valid pointer to a + // `kmem_cache` created for objects of type `T`, that it remains v= alid + // for lifetime `'a`, and that the cache is properly configured fo= r `T`. + unsafe { &*ptr.cast::() } + } +} =20 /// A slab cache with sheaf support. /// @@ -94,12 +198,12 @@ /// - `cache` is a valid pointer to a `kmem_cache` created with /// `__kmem_cache_create_args`. /// - The cache is valid for the lifetime of this struct. -pub struct KMemCache> { - cache: NonNull, - _p: PhantomData, +#[repr(transparent)] +pub struct KMemCacheHandle> { + cache: NonNull>, } =20 -impl> KMemCache { +impl> KMemCacheHandle { /// Creates a new slab cache with sheaf support. /// /// Creates a kernel slab cache for objects of type `T` with the speci= fied @@ -147,8 +251,7 @@ pub fn new(name: &CStr, sheaf_capacity: u32) -> Result<= Arc> // `kmem_cache_destroy` is called in `Drop`. Ok(Arc::new( Self { - cache: NonNull::new(ptr).ok_or(ENOMEM)?, - _p: PhantomData, + cache: NonNull::new(ptr.cast()).ok_or(ENOMEM)?, }, GFP_KERNEL, )?) @@ -175,11 +278,11 @@ pub fn new(name: &CStr, sheaf_capacity: u32) -> Resul= t> /// # Warnings /// /// The kernel will warn if `size` exceeds `sheaf_capacity`. - pub fn sheaf( - self: ArcBorrow<'_, Self>, + pub fn sheaf<'a>( + self: ArcBorrow<'a, Self>, size: usize, gfp: kernel::alloc::Flags, - ) -> Result> { + ) -> Result> { // SAFETY: `self.as_raw()` returns a valid cache pointer, and `siz= e` // has been validated to fit in a `c_uint`. let ptr =3D unsafe { @@ -191,17 +294,18 @@ pub fn sheaf( // was created. `dropped` is false since the sheaf has not been re= turned. Ok(Sheaf { sheaf: NonNull::new(ptr).ok_or(ENOMEM)?, - cache: self.into(), + cache: CacheRef::Arc(self.into()), dropped: false, + _p: PhantomData, }) } =20 fn as_raw(&self) -> *mut bindings::kmem_cache { - self.cache.as_ptr() + self.cache.as_ptr().cast() } } =20 -impl> Drop for KMemCache { +impl> Drop for KMemCacheHandle { fn drop(&mut self) { // SAFETY: `self.as_raw()` returns a valid cache pointer that was // created by `__kmem_cache_create_args`. As all objects allocated= from @@ -214,13 +318,13 @@ fn drop(&mut self) { /// Trait for types that can be initialized in a slab cache. /// /// This trait provides the initialization logic for objects allocated fro= m a -/// [`KMemCache`]. When the slab allocator creates new objects, it invokes= the -/// constructor to ensure objects are in a valid initial state. +/// [`KMemCache`]. The initializer is called when objects are allocated fr= om a +/// sheaf via [`Sheaf::alloc`]. /// /// # Implementation /// -/// Implementors must provide [`init`](KMemCacheInit::init), which returns -/// a in-place initializer for the type. +/// Implementors must provide [`init`](KMemCacheInit::init), which returns= an +/// infallible initializer for the type. /// /// # Example /// @@ -251,6 +355,28 @@ pub trait KMemCacheInit { fn init() -> impl Init; } =20 +/// Marker type for sheaves from static caches. +/// +/// Used as a type parameter for [`Sheaf`] to indicate the sheaf was creat= ed +/// from a `&'static KMemCache`. +pub enum Static {} + +/// Marker type for sheaves from dynamic caches. +/// +/// Used as a type parameter for [`Sheaf`] to indicate the sheaf was creat= ed +/// from a [`KMemCacheHandle`] via [`ArcBorrow`]. +pub enum Dynamic {} + +/// A sheaf from a static cache. +/// +/// This is a [`Sheaf`] backed by a `&'static KMemCache`. +pub type StaticSheaf<'a, T> =3D Sheaf<'a, T, Static>; + +/// A sheaf from a dynamic cache. +/// +/// This is a [`Sheaf`] backed by a reference-counted [`KMemCacheHandle`]. +pub type DynamicSheaf<'a, T> =3D Sheaf<'a, T, Dynamic>; + /// A pre-filled container of slab objects. /// /// A sheaf holds a set of pre-allocated objects from a [`KMemCache`]. @@ -261,12 +387,23 @@ pub trait KMemCacheInit { /// Sheaves provide faster allocation than direct allocation because they = use /// local locks with preemption disabled rather than atomic operations. /// +/// # Type parameters +/// +/// - `'a`: The lifetime of the cache reference. +/// - `T`: The type of objects in this sheaf. +/// - `A`: Either [`Static`] or [`Dynamic`], indicating whether the backing +/// cache is a static reference or a reference-counted handle. +/// +/// For convenience, [`StaticSheaf`] and [`DynamicSheaf`] type aliases are +/// provided. +/// /// # Lifecycle /// -/// Sheaves are created via [`KMemCache::sheaf`] and should be returned to= the -/// allocator when no longer needed via [`Sheaf::return_refill`]. If a she= af is -/// simply dropped, it is returned with `GFP_NOWAIT` flags, which may resu= lt in -/// the sheaf being flushed and freed rather than being cached for reuse. +/// Sheaves are created via [`KMemCache::sheaf`] or [`KMemCacheHandle::she= af`] +/// and should be returned to the allocator when no longer needed via +/// [`Sheaf::return_refill`]. If a sheaf is simply dropped, it is returned= with +/// `GFP_NOWAIT` flags, which may result in the sheaf being flushed and fr= eed +/// rather than being cached for reuse. /// /// # Invariants /// @@ -274,13 +411,14 @@ pub trait KMemCacheInit { /// `kmem_cache_prefill_sheaf`. /// - `cache` is the cache from which this sheaf was created. /// - `dropped` tracks whether the sheaf has been explicitly returned. -pub struct Sheaf> { +pub struct Sheaf<'a, T: KMemCacheInit, A> { sheaf: NonNull, - cache: Arc>, + cache: CacheRef, dropped: bool, + _p: PhantomData<(&'a KMemCache, A)>, } =20 -impl> Sheaf { +impl<'a, T: KMemCacheInit, A> Sheaf<'a, T, A> { fn as_raw(&self) -> *mut bindings::slab_sheaf { self.sheaf.as_ptr() } @@ -303,6 +441,75 @@ pub fn return_refill(mut self, flags: kernel::alloc::F= lags) { drop(self); } =20 + /// Refills the sheaf to at least the specified size. + /// + /// Replenishes the sheaf by preallocating objects until it contains at + /// least `size` objects. If the sheaf already contains `size` or more + /// objects, this is a no-op. In practice, the sheaf is refilled to its + /// full capacity. + /// + /// # Arguments + /// + /// - `flags`: Allocation flags controlling how memory is obtained. + /// - `size`: The minimum number of objects the sheaf should contain a= fter + /// refilling. If `size` exceeds the cache's `sheaf_capacity`, the s= heaf + /// may be replaced with a larger one. + /// + /// # Errors + /// + /// Returns an error if the objects could not be allocated. If refilli= ng + /// fails, the existing sheaf is left intact. + pub fn refill(&mut self, flags: kernel::alloc::Flags, size: usize) -> = Result { + // SAFETY: `self.cache.as_raw()` returns a valid cache pointer and + // `&raw mut self.sheaf` points to a valid sheaf per the type inva= riants. + kernel::error::to_result(unsafe { + bindings::kmem_cache_refill_sheaf( + self.cache.as_raw(), + flags.as_raw(), + (&raw mut (self.sheaf)).cast(), + size.try_into()?, + ) + }) + } +} + +impl<'a, T: KMemCacheInit> Sheaf<'a, T, Static> { + /// Allocates an object from the sheaf. + /// + /// Returns a new [`SBox`] containing an initialized object, or [`None= `] + /// if the sheaf is depleted. Allocations are guaranteed to succeed as + /// long as the sheaf contains pre-allocated objects. + /// + /// The `gfp` flags passed to `kmem_cache_alloc_from_sheaf` are set to= zero, + /// meaning no additional flags like `__GFP_ZERO` or `__GFP_ACCOUNT` a= re + /// applied. + /// + /// The returned `T` is initialized as part of this function. + pub fn alloc(&mut self) -> Option> { + // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return valid + // pointers. The function returns NULL when the sheaf is empty. + let ptr =3D unsafe { + bindings::kmem_cache_alloc_from_sheaf_noprof(self.cache.as_raw= (), 0, self.as_raw()) + }; + + // SAFETY: + // - `ptr` is a valid pointer as it was just returned by the cache. + // - The initializer is infallible, so an error is never returned. + unsafe { T::init().__init(ptr.cast()) }.expect("Initializer is inf= allible"); + + let ptr =3D NonNull::new(ptr.cast::())?; + + // INVARIANT: `ptr` was returned by `kmem_cache_alloc_from_sheaf_n= oprof` + // and initialized above. `cache` is the cache from which this obj= ect + // was allocated. The object remains valid until freed in `Drop`. + Some(SBox { + ptr, + cache: self.cache.clone(), + }) + } +} + +impl<'a, T: KMemCacheInit> Sheaf<'a, T, Dynamic> { /// Allocates an object from the sheaf. /// /// Returns a new [`SBox`] containing an initialized object, or [`None= `] @@ -338,7 +545,7 @@ pub fn alloc(&mut self) -> Option> { } } =20 -impl> Drop for Sheaf { +impl<'a, T: KMemCacheInit, A> Drop for Sheaf<'a, T, A> { fn drop(&mut self) { if !self.dropped { // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return va= lid @@ -355,6 +562,39 @@ fn drop(&mut self) { } } =20 +/// Internal reference to a cache, either static or reference-counted. +/// +/// # Invariants +/// +/// - For `CacheRef::Static`: the `NonNull` points to a valid `KMemCache` +/// with `'static` lifetime, derived from a `&'static KMemCache` refe= rence. +enum CacheRef> { + /// A reference-counted handle to a dynamically created cache. + Arc(Arc>), + /// A pointer to a static lifetime cache. + Static(NonNull>), +} + +impl> Clone for CacheRef { + fn clone(&self) -> Self { + match self { + Self::Arc(arg0) =3D> Self::Arc(arg0.clone()), + Self::Static(arg0) =3D> Self::Static(*arg0), + } + } +} + +impl> CacheRef { + fn as_raw(&self) -> *mut bindings::kmem_cache { + match self { + CacheRef::Arc(handle) =3D> handle.as_raw(), + // SAFETY: By type invariant, `ptr` points to a valid `KMemCac= he` + // with `'static` lifetime. + CacheRef::Static(ptr) =3D> unsafe { ptr.as_ref() }.as_raw(), + } + } +} + /// An owned allocation from a cache sheaf. /// /// `SBox` is similar to `Box` but is backed by a slab cache allocation ob= tained @@ -371,7 +611,50 @@ fn drop(&mut self) { /// - The object remains valid for the lifetime of the `SBox`. pub struct SBox> { ptr: NonNull, - cache: Arc>, + cache: CacheRef, +} + +impl> SBox { + /// Consumes the `SBox` and returns the raw pointer to the contained v= alue. + /// + /// The caller becomes responsible for freeing the memory. The object = is not + /// dropped and remains initialized. Use [`static_from_ptr`] to recons= truct + /// an `SBox` from the pointer. + /// + /// [`static_from_ptr`]: SBox::static_from_ptr + pub fn into_ptr(self) -> *mut T { + let ptr =3D self.ptr.as_ptr(); + core::mem::forget(self); + ptr + } + + /// Reconstructs an `SBox` from a raw pointer and cache. + /// + /// This is intended for use with objects that were previously convert= ed to + /// raw pointers via [`into_ptr`], typically for passing through C cod= e. + /// + /// [`into_ptr`]: SBox::into_ptr + /// + /// # Safety + /// + /// - `cache` must be a valid pointer to the `kmem_cache` from which `= value` + /// was allocated. + /// - `value` must be a valid pointer to an initialized `T` that was + /// allocated from `cache`. + /// - The caller must ensure that no other `SBox` or reference exists = for + /// `value`. + pub unsafe fn static_from_ptr(cache: *mut bindings::kmem_cache, value:= *mut T) -> Self { + // INVARIANT: The caller guarantees `value` points to a valid, + // initialized `T` allocated from `cache`. + Self { + // SAFETY: By function safety requirements, `value` is not nul= l. + ptr: unsafe { NonNull::new_unchecked(value) }, + cache: CacheRef::Static( + // SAFETY: By function safety requirements, `cache` is not= null. + unsafe { NonNull::new_unchecked(cache.cast()) }, + ), + } + } } =20 impl> Deref for SBox { --=20 2.51.2