Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous
allocator, typically used for larger objects, (much) larger than page
size.
All memory allocations made with `Vmalloc` end up in `vrealloc()`.
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
---
rust/helpers.c | 8 ++++++++
rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++
rust/kernel/alloc/allocator_test.rs | 1 +
3 files changed, 33 insertions(+)
diff --git a/rust/helpers.c b/rust/helpers.c
index 92d3c03ae1bd..4c628986f0c9 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -33,6 +33,7 @@
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
}
EXPORT_SYMBOL_GPL(rust_helper_krealloc);
+void * __must_check __realloc_size(2)
+rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
+{
+ return vrealloc(p, size, flags);
+}
+EXPORT_SYMBOL_GPL(rust_helper_vrealloc);
+
/*
* `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
* use it in contexts where Rust expects a `usize` like slice (array) indices.
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index 397ae5bcc043..e9a3d0694f41 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -16,6 +16,12 @@
/// `bindings::krealloc`.
pub struct Kmalloc;
+/// The virtually contiguous kernel allocator.
+///
+/// The vmalloc allocator allocates pages from the page level allocator and maps them into the
+/// contiguous kernel virtual space.
+pub struct Vmalloc;
+
/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
fn aligned_size(new_layout: Layout) -> usize {
// Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
@@ -58,6 +64,10 @@ fn krealloc() -> Self {
Self(bindings::krealloc)
}
+ fn vrealloc() -> Self {
+ Self(bindings::vrealloc)
+ }
+
// SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`.
unsafe fn call(
&self,
@@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
}
}
+unsafe impl Allocator for Vmalloc {
+ unsafe fn realloc(
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ let realloc = ReallocFunc::vrealloc();
+
+ // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
+ // allocated with this `Allocator`.
+ unsafe { realloc.call(ptr, layout, flags) }
+ }
+}
+
#[global_allocator]
static ALLOCATOR: Kmalloc = Kmalloc;
diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
index 4785efc474a7..e7bf2982f68f 100644
--- a/rust/kernel/alloc/allocator_test.rs
+++ b/rust/kernel/alloc/allocator_test.rs
@@ -7,6 +7,7 @@
use core::ptr::NonNull;
pub struct Kmalloc;
+pub type Vmalloc = Kmalloc;
unsafe impl Allocator for Kmalloc {
unsafe fn realloc(
--
2.45.2
On Thu, Aug 01, 2024 at 02:02:05AM +0200, Danilo Krummrich wrote:
> Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous
> allocator, typically used for larger objects, (much) larger than page
> size.
>
> All memory allocations made with `Vmalloc` end up in `vrealloc()`.
>
> Signed-off-by: Danilo Krummrich <dakr@kernel.org>
> ---
> rust/helpers.c | 8 ++++++++
> rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++
> rust/kernel/alloc/allocator_test.rs | 1 +
> 3 files changed, 33 insertions(+)
>
> diff --git a/rust/helpers.c b/rust/helpers.c
> index 92d3c03ae1bd..4c628986f0c9 100644
> --- a/rust/helpers.c
> +++ b/rust/helpers.c
> @@ -33,6 +33,7 @@
> #include <linux/sched/signal.h>
> #include <linux/slab.h>
> #include <linux/spinlock.h>
> +#include <linux/vmalloc.h>
> #include <linux/wait.h>
> #include <linux/workqueue.h>
>
> @@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> }
> EXPORT_SYMBOL_GPL(rust_helper_krealloc);
>
> +void * __must_check __realloc_size(2)
> +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> +{
> + return vrealloc(p, size, flags);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_vrealloc);
> +
> /*
> * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
> * use it in contexts where Rust expects a `usize` like slice (array) indices.
> diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> index 397ae5bcc043..e9a3d0694f41 100644
> --- a/rust/kernel/alloc/allocator.rs
> +++ b/rust/kernel/alloc/allocator.rs
> @@ -16,6 +16,12 @@
> /// `bindings::krealloc`.
> pub struct Kmalloc;
>
> +/// The virtually contiguous kernel allocator.
> +///
> +/// The vmalloc allocator allocates pages from the page level allocator and maps them into the
> +/// contiguous kernel virtual space.
> +pub struct Vmalloc;
> +
> /// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
> fn aligned_size(new_layout: Layout) -> usize {
> // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
> @@ -58,6 +64,10 @@ fn krealloc() -> Self {
> Self(bindings::krealloc)
> }
>
> + fn vrealloc() -> Self {
> + Self(bindings::vrealloc)
> + }
> +
> // SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`.
> unsafe fn call(
> &self,
> @@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
> }
> }
>
> +unsafe impl Allocator for Vmalloc {
> + unsafe fn realloc(
> + ptr: Option<NonNull<u8>>,
> + layout: Layout,
> + flags: Flags,
> + ) -> Result<NonNull<[u8]>, AllocError> {
> + let realloc = ReallocFunc::vrealloc();
> +
IIUC, vrealloc() calls __vmalloc_noprof() in allocation case, that is
calling __vmalloc_node_noprof() with align=1. In such a case, how would
vmalloc() guarantee the allocated memory is aligned to layout.align()?
[Cc Vlastimil]
Regards,
Boqun
> + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> + // allocated with this `Allocator`.
> + unsafe { realloc.call(ptr, layout, flags) }
> + }
> +}
> +
> #[global_allocator]
> static ALLOCATOR: Kmalloc = Kmalloc;
>
> diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
> index 4785efc474a7..e7bf2982f68f 100644
> --- a/rust/kernel/alloc/allocator_test.rs
> +++ b/rust/kernel/alloc/allocator_test.rs
> @@ -7,6 +7,7 @@
> use core::ptr::NonNull;
>
> pub struct Kmalloc;
> +pub type Vmalloc = Kmalloc;
>
> unsafe impl Allocator for Kmalloc {
> unsafe fn realloc(
> --
> 2.45.2
>
On Sat, Aug 03, 2024 at 11:44:58PM -0700, Boqun Feng wrote:
> On Thu, Aug 01, 2024 at 02:02:05AM +0200, Danilo Krummrich wrote:
> > Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous
> > allocator, typically used for larger objects, (much) larger than page
> > size.
> >
> > All memory allocations made with `Vmalloc` end up in `vrealloc()`.
> >
> > Signed-off-by: Danilo Krummrich <dakr@kernel.org>
> > ---
> > rust/helpers.c | 8 ++++++++
> > rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++
> > rust/kernel/alloc/allocator_test.rs | 1 +
> > 3 files changed, 33 insertions(+)
> >
> > diff --git a/rust/helpers.c b/rust/helpers.c
> > index 92d3c03ae1bd..4c628986f0c9 100644
> > --- a/rust/helpers.c
> > +++ b/rust/helpers.c
> > @@ -33,6 +33,7 @@
> > #include <linux/sched/signal.h>
> > #include <linux/slab.h>
> > #include <linux/spinlock.h>
> > +#include <linux/vmalloc.h>
> > #include <linux/wait.h>
> > #include <linux/workqueue.h>
> >
> > @@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> > }
> > EXPORT_SYMBOL_GPL(rust_helper_krealloc);
> >
> > +void * __must_check __realloc_size(2)
> > +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> > +{
> > + return vrealloc(p, size, flags);
> > +}
> > +EXPORT_SYMBOL_GPL(rust_helper_vrealloc);
> > +
> > /*
> > * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
> > * use it in contexts where Rust expects a `usize` like slice (array) indices.
> > diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> > index 397ae5bcc043..e9a3d0694f41 100644
> > --- a/rust/kernel/alloc/allocator.rs
> > +++ b/rust/kernel/alloc/allocator.rs
> > @@ -16,6 +16,12 @@
> > /// `bindings::krealloc`.
> > pub struct Kmalloc;
> >
> > +/// The virtually contiguous kernel allocator.
> > +///
> > +/// The vmalloc allocator allocates pages from the page level allocator and maps them into the
> > +/// contiguous kernel virtual space.
> > +pub struct Vmalloc;
> > +
> > /// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
> > fn aligned_size(new_layout: Layout) -> usize {
> > // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
> > @@ -58,6 +64,10 @@ fn krealloc() -> Self {
> > Self(bindings::krealloc)
> > }
> >
> > + fn vrealloc() -> Self {
> > + Self(bindings::vrealloc)
> > + }
> > +
> > // SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`.
> > unsafe fn call(
> > &self,
> > @@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
> > }
> > }
> >
> > +unsafe impl Allocator for Vmalloc {
> > + unsafe fn realloc(
> > + ptr: Option<NonNull<u8>>,
> > + layout: Layout,
> > + flags: Flags,
> > + ) -> Result<NonNull<[u8]>, AllocError> {
> > + let realloc = ReallocFunc::vrealloc();
> > +
>
> IIUC, vrealloc() calls __vmalloc_noprof() in allocation case, that is
> calling __vmalloc_node_noprof() with align=1. In such a case, how would
> vmalloc() guarantee the allocated memory is aligned to layout.align()?
True, good catch. I thought of this a while ago and then forgot to fix it.
>
> [Cc Vlastimil]
>
> Regards,
> Boqun
>
> > + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> > + // allocated with this `Allocator`.
> > + unsafe { realloc.call(ptr, layout, flags) }
> > + }
> > +}
> > +
> > #[global_allocator]
> > static ALLOCATOR: Kmalloc = Kmalloc;
> >
> > diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
> > index 4785efc474a7..e7bf2982f68f 100644
> > --- a/rust/kernel/alloc/allocator_test.rs
> > +++ b/rust/kernel/alloc/allocator_test.rs
> > @@ -7,6 +7,7 @@
> > use core::ptr::NonNull;
> >
> > pub struct Kmalloc;
> > +pub type Vmalloc = Kmalloc;
> >
> > unsafe impl Allocator for Kmalloc {
> > unsafe fn realloc(
> > --
> > 2.45.2
> >
>
On Sun, Aug 04, 2024 at 02:41:26PM +0200, Danilo Krummrich wrote:
> On Sat, Aug 03, 2024 at 11:44:58PM -0700, Boqun Feng wrote:
> > On Thu, Aug 01, 2024 at 02:02:05AM +0200, Danilo Krummrich wrote:
> > > Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous
> > > allocator, typically used for larger objects, (much) larger than page
> > > size.
> > >
> > > All memory allocations made with `Vmalloc` end up in `vrealloc()`.
> > >
> > > Signed-off-by: Danilo Krummrich <dakr@kernel.org>
> > > ---
> > > rust/helpers.c | 8 ++++++++
> > > rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++
> > > rust/kernel/alloc/allocator_test.rs | 1 +
> > > 3 files changed, 33 insertions(+)
> > >
> > > diff --git a/rust/helpers.c b/rust/helpers.c
> > > index 92d3c03ae1bd..4c628986f0c9 100644
> > > --- a/rust/helpers.c
> > > +++ b/rust/helpers.c
> > > @@ -33,6 +33,7 @@
> > > #include <linux/sched/signal.h>
> > > #include <linux/slab.h>
> > > #include <linux/spinlock.h>
> > > +#include <linux/vmalloc.h>
> > > #include <linux/wait.h>
> > > #include <linux/workqueue.h>
> > >
> > > @@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> > > }
> > > EXPORT_SYMBOL_GPL(rust_helper_krealloc);
> > >
> > > +void * __must_check __realloc_size(2)
> > > +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> > > +{
> > > + return vrealloc(p, size, flags);
> > > +}
> > > +EXPORT_SYMBOL_GPL(rust_helper_vrealloc);
> > > +
> > > /*
> > > * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
> > > * use it in contexts where Rust expects a `usize` like slice (array) indices.
> > > diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> > > index 397ae5bcc043..e9a3d0694f41 100644
> > > --- a/rust/kernel/alloc/allocator.rs
> > > +++ b/rust/kernel/alloc/allocator.rs
> > > @@ -16,6 +16,12 @@
> > > /// `bindings::krealloc`.
> > > pub struct Kmalloc;
> > >
> > > +/// The virtually contiguous kernel allocator.
> > > +///
> > > +/// The vmalloc allocator allocates pages from the page level allocator and maps them into the
> > > +/// contiguous kernel virtual space.
> > > +pub struct Vmalloc;
> > > +
> > > /// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
> > > fn aligned_size(new_layout: Layout) -> usize {
> > > // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
> > > @@ -58,6 +64,10 @@ fn krealloc() -> Self {
> > > Self(bindings::krealloc)
> > > }
> > >
> > > + fn vrealloc() -> Self {
> > > + Self(bindings::vrealloc)
> > > + }
> > > +
> > > // SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`.
> > > unsafe fn call(
> > > &self,
> > > @@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
> > > }
> > > }
> > >
> > > +unsafe impl Allocator for Vmalloc {
> > > + unsafe fn realloc(
> > > + ptr: Option<NonNull<u8>>,
> > > + layout: Layout,
> > > + flags: Flags,
> > > + ) -> Result<NonNull<[u8]>, AllocError> {
> > > + let realloc = ReallocFunc::vrealloc();
> > > +
> >
> > IIUC, vrealloc() calls __vmalloc_noprof() in allocation case, that is
> > calling __vmalloc_node_noprof() with align=1. In such a case, how would
> > vmalloc() guarantee the allocated memory is aligned to layout.align()?
>
> True, good catch. I thought of this a while ago and then forgot to fix it.
Just for clarification, we're always PAGE_SIZE aligned (guaranteed by
__alloc_vmap_area()), which probably would always be sufficient. That's why I
didn't gave it too much attention in the first place and then forgot about it.
However, we indeed do not honor layout.align() if it's larger than PAGE_SIZE.
>
> >
> > [Cc Vlastimil]
> >
> > Regards,
> > Boqun
> >
> > > + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> > > + // allocated with this `Allocator`.
> > > + unsafe { realloc.call(ptr, layout, flags) }
> > > + }
> > > +}
> > > +
> > > #[global_allocator]
> > > static ALLOCATOR: Kmalloc = Kmalloc;
> > >
> > > diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
> > > index 4785efc474a7..e7bf2982f68f 100644
> > > --- a/rust/kernel/alloc/allocator_test.rs
> > > +++ b/rust/kernel/alloc/allocator_test.rs
> > > @@ -7,6 +7,7 @@
> > > use core::ptr::NonNull;
> > >
> > > pub struct Kmalloc;
> > > +pub type Vmalloc = Kmalloc;
> > >
> > > unsafe impl Allocator for Kmalloc {
> > > unsafe fn realloc(
> > > --
> > > 2.45.2
> > >
> >
On Sun, Aug 04, 2024 at 05:16:49PM +0200, Danilo Krummrich wrote:
> On Sun, Aug 04, 2024 at 02:41:26PM +0200, Danilo Krummrich wrote:
> > On Sat, Aug 03, 2024 at 11:44:58PM -0700, Boqun Feng wrote:
> > > On Thu, Aug 01, 2024 at 02:02:05AM +0200, Danilo Krummrich wrote:
> > > > Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous
> > > > allocator, typically used for larger objects, (much) larger than page
> > > > size.
> > > >
> > > > All memory allocations made with `Vmalloc` end up in `vrealloc()`.
> > > >
> > > > Signed-off-by: Danilo Krummrich <dakr@kernel.org>
> > > > ---
> > > > rust/helpers.c | 8 ++++++++
> > > > rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++
> > > > rust/kernel/alloc/allocator_test.rs | 1 +
> > > > 3 files changed, 33 insertions(+)
> > > >
> > > > diff --git a/rust/helpers.c b/rust/helpers.c
> > > > index 92d3c03ae1bd..4c628986f0c9 100644
> > > > --- a/rust/helpers.c
> > > > +++ b/rust/helpers.c
> > > > @@ -33,6 +33,7 @@
> > > > #include <linux/sched/signal.h>
> > > > #include <linux/slab.h>
> > > > #include <linux/spinlock.h>
> > > > +#include <linux/vmalloc.h>
> > > > #include <linux/wait.h>
> > > > #include <linux/workqueue.h>
> > > >
> > > > @@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> > > > }
> > > > EXPORT_SYMBOL_GPL(rust_helper_krealloc);
> > > >
> > > > +void * __must_check __realloc_size(2)
> > > > +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> > > > +{
> > > > + return vrealloc(p, size, flags);
> > > > +}
> > > > +EXPORT_SYMBOL_GPL(rust_helper_vrealloc);
> > > > +
> > > > /*
> > > > * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
> > > > * use it in contexts where Rust expects a `usize` like slice (array) indices.
> > > > diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> > > > index 397ae5bcc043..e9a3d0694f41 100644
> > > > --- a/rust/kernel/alloc/allocator.rs
> > > > +++ b/rust/kernel/alloc/allocator.rs
> > > > @@ -16,6 +16,12 @@
> > > > /// `bindings::krealloc`.
> > > > pub struct Kmalloc;
> > > >
> > > > +/// The virtually contiguous kernel allocator.
> > > > +///
> > > > +/// The vmalloc allocator allocates pages from the page level allocator and maps them into the
> > > > +/// contiguous kernel virtual space.
> > > > +pub struct Vmalloc;
> > > > +
> > > > /// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
> > > > fn aligned_size(new_layout: Layout) -> usize {
> > > > // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
> > > > @@ -58,6 +64,10 @@ fn krealloc() -> Self {
> > > > Self(bindings::krealloc)
> > > > }
> > > >
> > > > + fn vrealloc() -> Self {
> > > > + Self(bindings::vrealloc)
> > > > + }
> > > > +
> > > > // SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`.
> > > > unsafe fn call(
> > > > &self,
> > > > @@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
> > > > }
> > > > }
> > > >
> > > > +unsafe impl Allocator for Vmalloc {
> > > > + unsafe fn realloc(
> > > > + ptr: Option<NonNull<u8>>,
> > > > + layout: Layout,
> > > > + flags: Flags,
> > > > + ) -> Result<NonNull<[u8]>, AllocError> {
> > > > + let realloc = ReallocFunc::vrealloc();
> > > > +
> > >
> > > IIUC, vrealloc() calls __vmalloc_noprof() in allocation case, that is
> > > calling __vmalloc_node_noprof() with align=1. In such a case, how would
> > > vmalloc() guarantee the allocated memory is aligned to layout.align()?
> >
> > True, good catch. I thought of this a while ago and then forgot to fix it.
>
> Just for clarification, we're always PAGE_SIZE aligned (guaranteed by
> __alloc_vmap_area()), which probably would always be sufficient. That's why I
> didn't gave it too much attention in the first place and then forgot about it.
>
> However, we indeed do not honor layout.align() if it's larger than PAGE_SIZE.
Another note on that:
My plan for this series was to just fail allocation for alignment requests
larger than PAGE_SIZE. And, if required, address larger alignments in a later
series, since this one is probably big enough already.
However, for `Vmalloc` we could support it right away, since it's trivial. For
`KVmalloc` though it requires a bit more effort.
For consistancy it would probably be better to support alignments larger than
PAGE_SIZE either for `Vmalloc` and `KVmalloc` or neither of those though.
My personal tendency goes a bit more into the direction of picking consistancy.
Any other opinions?
>
> >
> > >
> > > [Cc Vlastimil]
> > >
> > > Regards,
> > > Boqun
> > >
> > > > + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> > > > + // allocated with this `Allocator`.
> > > > + unsafe { realloc.call(ptr, layout, flags) }
> > > > + }
> > > > +}
> > > > +
> > > > #[global_allocator]
> > > > static ALLOCATOR: Kmalloc = Kmalloc;
> > > >
> > > > diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
> > > > index 4785efc474a7..e7bf2982f68f 100644
> > > > --- a/rust/kernel/alloc/allocator_test.rs
> > > > +++ b/rust/kernel/alloc/allocator_test.rs
> > > > @@ -7,6 +7,7 @@
> > > > use core::ptr::NonNull;
> > > >
> > > > pub struct Kmalloc;
> > > > +pub type Vmalloc = Kmalloc;
> > > >
> > > > unsafe impl Allocator for Kmalloc {
> > > > unsafe fn realloc(
> > > > --
> > > > 2.45.2
> > > >
> > >
On Sun, Aug 04, 2024 at 07:39:52PM +0200, Danilo Krummrich wrote:
[...]
> > > > > +unsafe impl Allocator for Vmalloc {
> > > > > + unsafe fn realloc(
> > > > > + ptr: Option<NonNull<u8>>,
> > > > > + layout: Layout,
> > > > > + flags: Flags,
> > > > > + ) -> Result<NonNull<[u8]>, AllocError> {
> > > > > + let realloc = ReallocFunc::vrealloc();
> > > > > +
> > > >
> > > > IIUC, vrealloc() calls __vmalloc_noprof() in allocation case, that is
> > > > calling __vmalloc_node_noprof() with align=1. In such a case, how would
> > > > vmalloc() guarantee the allocated memory is aligned to layout.align()?
> > >
> > > True, good catch. I thought of this a while ago and then forgot to fix it.
> >
> > Just for clarification, we're always PAGE_SIZE aligned (guaranteed by
> > __alloc_vmap_area()), which probably would always be sufficient. That's why I
> > didn't gave it too much attention in the first place and then forgot about it.
> >
> > However, we indeed do not honor layout.align() if it's larger than PAGE_SIZE.
>
> Another note on that:
>
> My plan for this series was to just fail allocation for alignment requests
> larger than PAGE_SIZE. And, if required, address larger alignments in a later
Yeah, this sounds reasonable.
> series, since this one is probably big enough already.
>
> However, for `Vmalloc` we could support it right away, since it's trivial. For
> `KVmalloc` though it requires a bit more effort.
>
Could you elaborate why it requires a bit more effort? Because
kvrealloc() and kvmalloc() in C don't have a way to specify alignment
requirement? If so, I think a solution to that would be just providing
the K-or-V switch in Rust code, i.e. just `Vmalloc` and `Kmalloc` to
implement `KVmalloc`, which I don't think is a bad idea.
Regards,
Boqun
> For consistancy it would probably be better to support alignments larger than
> PAGE_SIZE either for `Vmalloc` and `KVmalloc` or neither of those though.
>
> My personal tendency goes a bit more into the direction of picking consistancy.
>
> Any other opinions?
>
[...]
On Sun, Aug 04, 2024 at 04:57:30PM -0700, Boqun Feng wrote:
> On Sun, Aug 04, 2024 at 07:39:52PM +0200, Danilo Krummrich wrote:
> [...]
> > > > > > +unsafe impl Allocator for Vmalloc {
> > > > > > + unsafe fn realloc(
> > > > > > + ptr: Option<NonNull<u8>>,
> > > > > > + layout: Layout,
> > > > > > + flags: Flags,
> > > > > > + ) -> Result<NonNull<[u8]>, AllocError> {
> > > > > > + let realloc = ReallocFunc::vrealloc();
> > > > > > +
> > > > >
> > > > > IIUC, vrealloc() calls __vmalloc_noprof() in allocation case, that is
> > > > > calling __vmalloc_node_noprof() with align=1. In such a case, how would
> > > > > vmalloc() guarantee the allocated memory is aligned to layout.align()?
> > > >
> > > > True, good catch. I thought of this a while ago and then forgot to fix it.
> > >
> > > Just for clarification, we're always PAGE_SIZE aligned (guaranteed by
> > > __alloc_vmap_area()), which probably would always be sufficient. That's why I
> > > didn't gave it too much attention in the first place and then forgot about it.
> > >
> > > However, we indeed do not honor layout.align() if it's larger than PAGE_SIZE.
> >
> > Another note on that:
> >
> > My plan for this series was to just fail allocation for alignment requests
> > larger than PAGE_SIZE. And, if required, address larger alignments in a later
>
> Yeah, this sounds reasonable.
>
> > series, since this one is probably big enough already.
> >
> > However, for `Vmalloc` we could support it right away, since it's trivial. For
> > `KVmalloc` though it requires a bit more effort.
> >
>
> Could you elaborate why it requires a bit more effort? Because
> kvrealloc() and kvmalloc() in C don't have a way to specify alignment
> requirement?
Yes, exactly that.
> If so, I think a solution to that would be just providing
> the K-or-V switch in Rust code, i.e. just `Vmalloc` and `Kmalloc` to
> implement `KVmalloc`, which I don't think is a bad idea.
I really think we should do it in C. Look at all the special cases is
__kvmalloc_node_noprof(): fixup page flags, sanity check the size on kmalloc
failure, fail on certain page flags, etc.
I think we really want to keep all this logic in a single place and not
replicate it on the Rust side.
>
> Regards,
> Boqun
>
> > For consistancy it would probably be better to support alignments larger than
> > PAGE_SIZE either for `Vmalloc` and `KVmalloc` or neither of those though.
> >
> > My personal tendency goes a bit more into the direction of picking consistancy.
> >
> > Any other opinions?
> >
> [...]
>
On Thu, Aug 1, 2024 at 2:07 AM Danilo Krummrich <dakr@kernel.org> wrote:
>
> Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous
> allocator, typically used for larger objects, (much) larger than page
> size.
>
> All memory allocations made with `Vmalloc` end up in `vrealloc()`.
>
> Signed-off-by: Danilo Krummrich <dakr@kernel.org>
One nit below, but.
Reviewed-by: Alice Ryhl <aliceryhl@google.com>
> rust/helpers.c | 8 ++++++++
> rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++
> rust/kernel/alloc/allocator_test.rs | 1 +
> 3 files changed, 33 insertions(+)
>
> diff --git a/rust/helpers.c b/rust/helpers.c
> index 92d3c03ae1bd..4c628986f0c9 100644
> --- a/rust/helpers.c
> +++ b/rust/helpers.c
> @@ -33,6 +33,7 @@
> #include <linux/sched/signal.h>
> #include <linux/slab.h>
> #include <linux/spinlock.h>
> +#include <linux/vmalloc.h>
> #include <linux/wait.h>
> #include <linux/workqueue.h>
>
> @@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> }
> EXPORT_SYMBOL_GPL(rust_helper_krealloc);
>
> +void * __must_check __realloc_size(2)
> +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> +{
> + return vrealloc(p, size, flags);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_vrealloc);
> +
> /*
> * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
> * use it in contexts where Rust expects a `usize` like slice (array) indices.
> diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> index 397ae5bcc043..e9a3d0694f41 100644
> --- a/rust/kernel/alloc/allocator.rs
> +++ b/rust/kernel/alloc/allocator.rs
> @@ -16,6 +16,12 @@
> /// `bindings::krealloc`.
> pub struct Kmalloc;
>
> +/// The virtually contiguous kernel allocator.
> +///
> +/// The vmalloc allocator allocates pages from the page level allocator and maps them into the
> +/// contiguous kernel virtual space.
> +pub struct Vmalloc;
> +
> /// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
> fn aligned_size(new_layout: Layout) -> usize {
> // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
> @@ -58,6 +64,10 @@ fn krealloc() -> Self {
> Self(bindings::krealloc)
> }
>
> + fn vrealloc() -> Self {
> + Self(bindings::vrealloc)
> + }
Technically needs an
// INVARIANT: The function pointer is vrealloc.
Self(bindings::vrealloc)
> +
> // SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`.
> unsafe fn call(
> &self,
> @@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
> }
> }
>
> +unsafe impl Allocator for Vmalloc {
> + unsafe fn realloc(
> + ptr: Option<NonNull<u8>>,
> + layout: Layout,
> + flags: Flags,
> + ) -> Result<NonNull<[u8]>, AllocError> {
> + let realloc = ReallocFunc::vrealloc();
> +
> + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> + // allocated with this `Allocator`.
> + unsafe { realloc.call(ptr, layout, flags) }
> + }
> +}
> +
> #[global_allocator]
> static ALLOCATOR: Kmalloc = Kmalloc;
>
> diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
> index 4785efc474a7..e7bf2982f68f 100644
> --- a/rust/kernel/alloc/allocator_test.rs
> +++ b/rust/kernel/alloc/allocator_test.rs
> @@ -7,6 +7,7 @@
> use core::ptr::NonNull;
>
> pub struct Kmalloc;
> +pub type Vmalloc = Kmalloc;
>
> unsafe impl Allocator for Kmalloc {
> unsafe fn realloc(
> --
> 2.45.2
>
© 2016 - 2026 Red Hat, Inc.