From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
The fast path through a write will require replacing a single node in
the tree. Using a sheaf (32 nodes) is too heavy for the fast path, so
special case the node store operation by just allocating one node in the
maple state.
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
include/linux/maple_tree.h | 4 +++-
lib/maple_tree.c | 47 ++++++++++++++++++++++++++++++++++++++++------
2 files changed, 44 insertions(+), 7 deletions(-)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 3cf1ae9dde7ce43fa20ae400c01fefad048c302e..61eb5e7d09ad0133978e3ac4b2af66710421e769 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -443,6 +443,7 @@ struct ma_state {
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ struct maple_node *alloc; /* allocated nodes */
unsigned long node_request;
enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
@@ -491,8 +492,9 @@ struct ma_wr_state {
.status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
- .node_request= 0, \
.sheaf = NULL, \
+ .alloc = NULL, \
+ .node_request= 0, \
.mas_flags = 0, \
.store_type = wr_invalid, \
}
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 3c3c14a76d98ded3b619c178d64099b464a2ca23..9aa782b1497f224e7366ebbd65f997523ee0c8ab 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1101,16 +1101,23 @@ static int mas_ascend(struct ma_state *mas)
*
* Return: A pointer to a maple node.
*/
-static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas)
{
struct maple_node *ret;
+ if (mas->alloc) {
+ ret = mas->alloc;
+ mas->alloc = NULL;
+ goto out;
+ }
+
if (WARN_ON_ONCE(!mas->sheaf))
return NULL;
ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf);
- memset(ret, 0, sizeof(*ret));
+out:
+ memset(ret, 0, sizeof(*ret));
return ret;
}
@@ -1121,9 +1128,34 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
*/
static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
{
- if (unlikely(mas->sheaf)) {
- unsigned long refill = mas->node_request;
+ if (!mas->node_request)
+ return;
+
+ if (mas->node_request == 1) {
+ if (mas->sheaf)
+ goto use_sheaf;
+
+ if (mas->alloc)
+ return;
+ mas->alloc = mt_alloc_one(gfp);
+ if (!mas->alloc)
+ goto error;
+
+ mas->node_request = 0;
+ return;
+ }
+
+use_sheaf:
+ if (unlikely(mas->alloc)) {
+ mt_free_one(mas->alloc);
+ mas->alloc = NULL;
+ }
+
+ if (mas->sheaf) {
+ unsigned long refill;
+
+ refill = mas->node_request;
if(kmem_cache_sheaf_size(mas->sheaf) >= refill) {
mas->node_request = 0;
return;
@@ -5386,8 +5418,11 @@ void mas_destroy(struct ma_state *mas)
mas->node_request = 0;
if (mas->sheaf)
mt_return_sheaf(mas->sheaf);
-
mas->sheaf = NULL;
+
+ if (mas->alloc)
+ mt_free_one(mas->alloc);
+ mas->alloc = NULL;
}
EXPORT_SYMBOL_GPL(mas_destroy);
@@ -6074,7 +6109,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
mas_alloc_nodes(mas, gfp);
}
- if (!mas->sheaf)
+ if (!mas->sheaf && !mas->alloc)
return false;
mas->status = ma_start;
--
2.50.1
On Wed, Jul 23, 2025 at 6:35 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
>
> The fast path through a write will require replacing a single node in
> the tree. Using a sheaf (32 nodes) is too heavy for the fast path, so
> special case the node store operation by just allocating one node in the
> maple state.
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
> include/linux/maple_tree.h | 4 +++-
> lib/maple_tree.c | 47 ++++++++++++++++++++++++++++++++++++++++------
> 2 files changed, 44 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
> index 3cf1ae9dde7ce43fa20ae400c01fefad048c302e..61eb5e7d09ad0133978e3ac4b2af66710421e769 100644
> --- a/include/linux/maple_tree.h
> +++ b/include/linux/maple_tree.h
> @@ -443,6 +443,7 @@ struct ma_state {
> unsigned long min; /* The minimum index of this node - implied pivot min */
> unsigned long max; /* The maximum index of this node - implied pivot max */
> struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
> + struct maple_node *alloc; /* allocated nodes */
> unsigned long node_request;
> enum maple_status status; /* The status of the state (active, start, none, etc) */
> unsigned char depth; /* depth of tree descent during write */
> @@ -491,8 +492,9 @@ struct ma_wr_state {
> .status = ma_start, \
> .min = 0, \
> .max = ULONG_MAX, \
> - .node_request= 0, \
> .sheaf = NULL, \
> + .alloc = NULL, \
> + .node_request= 0, \
> .mas_flags = 0, \
> .store_type = wr_invalid, \
> }
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 3c3c14a76d98ded3b619c178d64099b464a2ca23..9aa782b1497f224e7366ebbd65f997523ee0c8ab 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -1101,16 +1101,23 @@ static int mas_ascend(struct ma_state *mas)
> *
> * Return: A pointer to a maple node.
> */
> -static inline struct maple_node *mas_pop_node(struct ma_state *mas)
> +static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas)
> {
> struct maple_node *ret;
>
> + if (mas->alloc) {
> + ret = mas->alloc;
> + mas->alloc = NULL;
> + goto out;
> + }
> +
> if (WARN_ON_ONCE(!mas->sheaf))
> return NULL;
>
> ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf);
> - memset(ret, 0, sizeof(*ret));
>
> +out:
> + memset(ret, 0, sizeof(*ret));
> return ret;
> }
>
> @@ -1121,9 +1128,34 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
> */
> static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
> {
> - if (unlikely(mas->sheaf)) {
> - unsigned long refill = mas->node_request;
> + if (!mas->node_request)
> + return;
> +
> + if (mas->node_request == 1) {
> + if (mas->sheaf)
> + goto use_sheaf;
> +
> + if (mas->alloc)
> + return;
>
> + mas->alloc = mt_alloc_one(gfp);
> + if (!mas->alloc)
> + goto error;
> +
> + mas->node_request = 0;
> + return;
> + }
> +
> +use_sheaf:
> + if (unlikely(mas->alloc)) {
When would this condition happen? Do we really need to free mas->alloc
here or it can be reused for the next 1-node allocation?
> + mt_free_one(mas->alloc);
> + mas->alloc = NULL;
> + }
> +
> + if (mas->sheaf) {
> + unsigned long refill;
> +
> + refill = mas->node_request;
> if(kmem_cache_sheaf_size(mas->sheaf) >= refill) {
> mas->node_request = 0;
> return;
> @@ -5386,8 +5418,11 @@ void mas_destroy(struct ma_state *mas)
> mas->node_request = 0;
> if (mas->sheaf)
> mt_return_sheaf(mas->sheaf);
> -
> mas->sheaf = NULL;
> +
> + if (mas->alloc)
> + mt_free_one(mas->alloc);
> + mas->alloc = NULL;
> }
> EXPORT_SYMBOL_GPL(mas_destroy);
>
> @@ -6074,7 +6109,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
> mas_alloc_nodes(mas, gfp);
> }
>
> - if (!mas->sheaf)
> + if (!mas->sheaf && !mas->alloc)
> return false;
>
> mas->status = ma_start;
>
> --
> 2.50.1
>
* Suren Baghdasaryan <surenb@google.com> [250822 16:25]:
> On Wed, Jul 23, 2025 at 6:35 AM Vlastimil Babka <vbabka@suse.cz> wrote:
> >
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> >
> > The fast path through a write will require replacing a single node in
> > the tree. Using a sheaf (32 nodes) is too heavy for the fast path, so
> > special case the node store operation by just allocating one node in the
> > maple state.
> >
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> > ---
> > include/linux/maple_tree.h | 4 +++-
> > lib/maple_tree.c | 47 ++++++++++++++++++++++++++++++++++++++++------
> > 2 files changed, 44 insertions(+), 7 deletions(-)
> >
> > diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
> > index 3cf1ae9dde7ce43fa20ae400c01fefad048c302e..61eb5e7d09ad0133978e3ac4b2af66710421e769 100644
> > --- a/include/linux/maple_tree.h
> > +++ b/include/linux/maple_tree.h
> > @@ -443,6 +443,7 @@ struct ma_state {
> > unsigned long min; /* The minimum index of this node - implied pivot min */
> > unsigned long max; /* The maximum index of this node - implied pivot max */
> > struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
> > + struct maple_node *alloc; /* allocated nodes */
> > unsigned long node_request;
> > enum maple_status status; /* The status of the state (active, start, none, etc) */
> > unsigned char depth; /* depth of tree descent during write */
> > @@ -491,8 +492,9 @@ struct ma_wr_state {
> > .status = ma_start, \
> > .min = 0, \
> > .max = ULONG_MAX, \
> > - .node_request= 0, \
> > .sheaf = NULL, \
> > + .alloc = NULL, \
> > + .node_request= 0, \
> > .mas_flags = 0, \
> > .store_type = wr_invalid, \
> > }
> > diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> > index 3c3c14a76d98ded3b619c178d64099b464a2ca23..9aa782b1497f224e7366ebbd65f997523ee0c8ab 100644
> > --- a/lib/maple_tree.c
> > +++ b/lib/maple_tree.c
> > @@ -1101,16 +1101,23 @@ static int mas_ascend(struct ma_state *mas)
> > *
> > * Return: A pointer to a maple node.
> > */
> > -static inline struct maple_node *mas_pop_node(struct ma_state *mas)
> > +static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas)
> > {
> > struct maple_node *ret;
> >
> > + if (mas->alloc) {
> > + ret = mas->alloc;
> > + mas->alloc = NULL;
> > + goto out;
> > + }
> > +
> > if (WARN_ON_ONCE(!mas->sheaf))
> > return NULL;
> >
> > ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf);
> > - memset(ret, 0, sizeof(*ret));
> >
> > +out:
> > + memset(ret, 0, sizeof(*ret));
> > return ret;
> > }
> >
> > @@ -1121,9 +1128,34 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
> > */
> > static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
> > {
> > - if (unlikely(mas->sheaf)) {
> > - unsigned long refill = mas->node_request;
> > + if (!mas->node_request)
> > + return;
> > +
> > + if (mas->node_request == 1) {
> > + if (mas->sheaf)
> > + goto use_sheaf;
> > +
> > + if (mas->alloc)
> > + return;
> >
> > + mas->alloc = mt_alloc_one(gfp);
> > + if (!mas->alloc)
> > + goto error;
> > +
> > + mas->node_request = 0;
> > + return;
> > + }
> > +
> > +use_sheaf:
> > + if (unlikely(mas->alloc)) {
>
> When would this condition happen?
This would be the case if we have one node allocated and requested more
than one node. That is, a chained request for nodes that ends up having
the alloc set and requesting a sheaf.
> Do we really need to free mas->alloc
> here or it can be reused for the next 1-node allocation?
Most calls end in mas_destroy() so that won't happen today.
We could reduce the number of allocations requested to the sheaf and let
the code find the mas->alloc first and use that.
But remember, we are getting into this situation where code did a
mas_preallocate() then figured they needed to do something else (error
recovery, or changed the vma flags and now it can merge..) and will now
need additional nodes. So this is a rare case, so I figured just free
it was the safest thing.
> > + mt_free_one(mas->alloc);
> > + mas->alloc = NULL;
> > + }
> > +
> > + if (mas->sheaf) {
> > + unsigned long refill;
> > +
> > + refill = mas->node_request;
> > if(kmem_cache_sheaf_size(mas->sheaf) >= refill) {
> > mas->node_request = 0;
> > return;
> > @@ -5386,8 +5418,11 @@ void mas_destroy(struct ma_state *mas)
> > mas->node_request = 0;
> > if (mas->sheaf)
> > mt_return_sheaf(mas->sheaf);
> > -
> > mas->sheaf = NULL;
> > +
> > + if (mas->alloc)
> > + mt_free_one(mas->alloc);
> > + mas->alloc = NULL;
> > }
> > EXPORT_SYMBOL_GPL(mas_destroy);
> >
> > @@ -6074,7 +6109,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
> > mas_alloc_nodes(mas, gfp);
> > }
> >
> > - if (!mas->sheaf)
> > + if (!mas->sheaf && !mas->alloc)
> > return false;
> >
> > mas->status = ma_start;
> >
> > --
> > 2.50.1
> >
On Tue, Aug 26, 2025 at 8:11 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote:
>
> * Suren Baghdasaryan <surenb@google.com> [250822 16:25]:
> > On Wed, Jul 23, 2025 at 6:35 AM Vlastimil Babka <vbabka@suse.cz> wrote:
> > >
> > > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> > >
> > > The fast path through a write will require replacing a single node in
> > > the tree. Using a sheaf (32 nodes) is too heavy for the fast path, so
> > > special case the node store operation by just allocating one node in the
> > > maple state.
> > >
> > > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> > > ---
> > > include/linux/maple_tree.h | 4 +++-
> > > lib/maple_tree.c | 47 ++++++++++++++++++++++++++++++++++++++++------
> > > 2 files changed, 44 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
> > > index 3cf1ae9dde7ce43fa20ae400c01fefad048c302e..61eb5e7d09ad0133978e3ac4b2af66710421e769 100644
> > > --- a/include/linux/maple_tree.h
> > > +++ b/include/linux/maple_tree.h
> > > @@ -443,6 +443,7 @@ struct ma_state {
> > > unsigned long min; /* The minimum index of this node - implied pivot min */
> > > unsigned long max; /* The maximum index of this node - implied pivot max */
> > > struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
> > > + struct maple_node *alloc; /* allocated nodes */
> > > unsigned long node_request;
> > > enum maple_status status; /* The status of the state (active, start, none, etc) */
> > > unsigned char depth; /* depth of tree descent during write */
> > > @@ -491,8 +492,9 @@ struct ma_wr_state {
> > > .status = ma_start, \
> > > .min = 0, \
> > > .max = ULONG_MAX, \
> > > - .node_request= 0, \
> > > .sheaf = NULL, \
> > > + .alloc = NULL, \
> > > + .node_request= 0, \
> > > .mas_flags = 0, \
> > > .store_type = wr_invalid, \
> > > }
> > > diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> > > index 3c3c14a76d98ded3b619c178d64099b464a2ca23..9aa782b1497f224e7366ebbd65f997523ee0c8ab 100644
> > > --- a/lib/maple_tree.c
> > > +++ b/lib/maple_tree.c
> > > @@ -1101,16 +1101,23 @@ static int mas_ascend(struct ma_state *mas)
> > > *
> > > * Return: A pointer to a maple node.
> > > */
> > > -static inline struct maple_node *mas_pop_node(struct ma_state *mas)
> > > +static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas)
> > > {
> > > struct maple_node *ret;
> > >
> > > + if (mas->alloc) {
> > > + ret = mas->alloc;
> > > + mas->alloc = NULL;
> > > + goto out;
> > > + }
> > > +
> > > if (WARN_ON_ONCE(!mas->sheaf))
> > > return NULL;
> > >
> > > ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf);
> > > - memset(ret, 0, sizeof(*ret));
> > >
> > > +out:
> > > + memset(ret, 0, sizeof(*ret));
> > > return ret;
> > > }
> > >
> > > @@ -1121,9 +1128,34 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
> > > */
> > > static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
> > > {
> > > - if (unlikely(mas->sheaf)) {
> > > - unsigned long refill = mas->node_request;
> > > + if (!mas->node_request)
> > > + return;
> > > +
> > > + if (mas->node_request == 1) {
> > > + if (mas->sheaf)
> > > + goto use_sheaf;
> > > +
> > > + if (mas->alloc)
> > > + return;
> > >
> > > + mas->alloc = mt_alloc_one(gfp);
> > > + if (!mas->alloc)
> > > + goto error;
> > > +
> > > + mas->node_request = 0;
> > > + return;
> > > + }
> > > +
> > > +use_sheaf:
> > > + if (unlikely(mas->alloc)) {
> >
> > When would this condition happen?
>
>
> This would be the case if we have one node allocated and requested more
> than one node. That is, a chained request for nodes that ends up having
> the alloc set and requesting a sheaf.
Ah, ok. So this is also a recovery case when we thought we need only
one node and then the situation changed and we need more than one?
>
> > Do we really need to free mas->alloc
> > here or it can be reused for the next 1-node allocation?
>
> Most calls end in mas_destroy() so that won't happen today.
>
> We could reduce the number of allocations requested to the sheaf and let
> the code find the mas->alloc first and use that.
>
> But remember, we are getting into this situation where code did a
> mas_preallocate() then figured they needed to do something else (error
> recovery, or changed the vma flags and now it can merge..) and will now
> need additional nodes. So this is a rare case, so I figured just free
> it was the safest thing.
Ok, got it. Both situations would be part of the unusual recovery
case. Makes sense then. Thanks!
>
>
> > > + mt_free_one(mas->alloc);
> > > + mas->alloc = NULL;
> > > + }
> > > +
> > > + if (mas->sheaf) {
> > > + unsigned long refill;
> > > +
> > > + refill = mas->node_request;
> > > if(kmem_cache_sheaf_size(mas->sheaf) >= refill) {
> > > mas->node_request = 0;
> > > return;
> > > @@ -5386,8 +5418,11 @@ void mas_destroy(struct ma_state *mas)
> > > mas->node_request = 0;
> > > if (mas->sheaf)
> > > mt_return_sheaf(mas->sheaf);
> > > -
> > > mas->sheaf = NULL;
> > > +
> > > + if (mas->alloc)
> > > + mt_free_one(mas->alloc);
> > > + mas->alloc = NULL;
> > > }
> > > EXPORT_SYMBOL_GPL(mas_destroy);
> > >
> > > @@ -6074,7 +6109,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
> > > mas_alloc_nodes(mas, gfp);
> > > }
> > >
> > > - if (!mas->sheaf)
> > > + if (!mas->sheaf && !mas->alloc)
> > > return false;
> > >
> > > mas->status = ma_start;
> > >
> > > --
> > > 2.50.1
> > >
© 2016 - 2026 Red Hat, Inc.