rust/kernel/slab.rs | 52 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+)
this revision adds gen_kmem_cache_allocator, a macro that implements
Allocator::realloc for kmem_cache. the one concern that I did have was realloc()
for resizing, since that obviously isn't possible for slab
Signed-off-by: Elijah Wright <git@elijahs.space>
---
rust/kernel/slab.rs | 52 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/rust/kernel/slab.rs b/rust/kernel/slab.rs
index 8b418f9db7cb..3f1310f309c5 100644
--- a/rust/kernel/slab.rs
+++ b/rust/kernel/slab.rs
@@ -83,3 +83,55 @@ fn drop(&mut self) {
unsafe { bindings::kmem_cache_destroy(self.cache.as_ptr()) };
}
}
+
+// SAFETY: The pointer does not change after creation, so `Slab<T>` may
+// be used from multiple threads.
+unsafe impl<T> Send for Slab<T> {}
+unsafe impl<T> Sync for Slab<T> {}
+
+/// Generates a zero-sized allocator type that allocates from a given
+/// `Slab<T>`.
+#[macro_export]
+macro_rules! gen_kmem_cache_allocator {
+ (struct $name:ident for $cache:expr $(,)?) => {
+ #[derive(Clone, Copy, Default)]
+ pub struct $name;
+
+ // SAFETY: Allocation and free happen through kernel APIs which
+ // provide guarantees. The ZST carries no state, so it can be
+ // duplicated freely.
+ unsafe impl $crate::alloc::Allocator for $name {
+ #[inline]
+ unsafe fn realloc(
+ ptr: Option::<::core::ptr::NonNull<u8>>,
+ layout: ::core::alloc::Layout,
+ old_layout: ::core::alloc::Layout,
+ flags: $crate::alloc::Flags,
+ ) -> ::core::result::Result<::core::ptr::NonNull<[u8]>, $crate::alloc::AllocError> {
+ if layout.size() == 0 {
+ if let Some(p) = ptr {
+ // SAFETY: Caller promises `p` came from this allocator.
+ unsafe {
+ $crate::bindings::kmem_cache_free($cache.as_ptr(), p.as_ptr().cast());
+ }
+ }
+ let dang = $crate::alloc::dangling_from_layout(layout);
+ let slice = ::core::ptr::NonNull::slice_from_raw_parts(dang, 0);
+ return Ok(slice);
+ }
+
+ if ptr.is_some() {
+ return Err($crate::alloc::AllocError);
+ }
+
+ let raw_ptr = unsafe {
+ $crate::bindings::kmem_cache_alloc($cache.as_ptr(), flags.as_raw())
+ };
+ let nn = ::core::ptr::NonNull::new(raw_ptr.cast())
+ .ok_or($crate::alloc::AllocError)?;
+ let slice = ::core::ptr::NonNull::slice_from_raw_parts(nn.cast::<u8>(), layout.size());
+ Ok(slice)
+ }
+ }
+ };
+}
\ No newline at end of file
--
2.49.1
On Wed, Oct 1, 2025 at 6:45 AM Elijah Wright <git@elijahs.space> wrote:
>
> this revision adds gen_kmem_cache_allocator, a macro that implements
> Allocator::realloc for kmem_cache. the one concern that I did have was realloc()
> for resizing, since that obviously isn't possible for slab
>
> Signed-off-by: Elijah Wright <git@elijahs.space>
When you send a new version, please don't reply to the previous
version. It's too easy to miss the ne version if you do.
> ---
> rust/kernel/slab.rs | 52 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 52 insertions(+)
>
> diff --git a/rust/kernel/slab.rs b/rust/kernel/slab.rs
> index 8b418f9db7cb..3f1310f309c5 100644
> --- a/rust/kernel/slab.rs
> +++ b/rust/kernel/slab.rs
> @@ -83,3 +83,55 @@ fn drop(&mut self) {
> unsafe { bindings::kmem_cache_destroy(self.cache.as_ptr()) };
> }
> }
> +
> +// SAFETY: The pointer does not change after creation, so `Slab<T>` may
> +// be used from multiple threads.
> +unsafe impl<T> Send for Slab<T> {}
> +unsafe impl<T> Sync for Slab<T> {}
> +
> +/// Generates a zero-sized allocator type that allocates from a given
> +/// `Slab<T>`.
> +#[macro_export]
> +macro_rules! gen_kmem_cache_allocator {
> + (struct $name:ident for $cache:expr $(,)?) => {
> + #[derive(Clone, Copy, Default)]
> + pub struct $name;
> +
> + // SAFETY: Allocation and free happen through kernel APIs which
> + // provide guarantees. The ZST carries no state, so it can be
> + // duplicated freely.
> + unsafe impl $crate::alloc::Allocator for $name {
> + #[inline]
> + unsafe fn realloc(
> + ptr: Option::<::core::ptr::NonNull<u8>>,
> + layout: ::core::alloc::Layout,
> + old_layout: ::core::alloc::Layout,
> + flags: $crate::alloc::Flags,
> + ) -> ::core::result::Result<::core::ptr::NonNull<[u8]>, $crate::alloc::AllocError> {
> + if layout.size() == 0 {
> + if let Some(p) = ptr {
> + // SAFETY: Caller promises `p` came from this allocator.
> + unsafe {
> + $crate::bindings::kmem_cache_free($cache.as_ptr(), p.as_ptr().cast());
> + }
> + }
> + let dang = $crate::alloc::dangling_from_layout(layout);
> + let slice = ::core::ptr::NonNull::slice_from_raw_parts(dang, 0);
> + return Ok(slice);
> + }
> +
> + if ptr.is_some() {
> + return Err($crate::alloc::AllocError);
> + }
> +
> + let raw_ptr = unsafe {
> + $crate::bindings::kmem_cache_alloc($cache.as_ptr(), flags.as_raw())
> + };
> + let nn = ::core::ptr::NonNull::new(raw_ptr.cast())
> + .ok_or($crate::alloc::AllocError)?;
> + let slice = ::core::ptr::NonNull::slice_from_raw_parts(nn.cast::<u8>(), layout.size());
> + Ok(slice)
Hm, this is kind of tricky. We specify a size when calling this, but
kmem caches only support one single size. I don't know what to do
about that.
Alice
© 2016 - 2026 Red Hat, Inc.