During __filemap_add_folio(), a shadow entry is covering n slots and a
folio covers m slots with m < n is to be added. Instead of splitting all
n slots, only the m slots covered by the folio need to be split and the
remaining n-m shadow entries can be retained with orders ranging from m to
n-1. This method only requires
(n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT)
new xa_nodes instead of
(n % XA_CHUNK_SHIFT) * ((n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT))
new xa_nodes, compared to the original xas_split_alloc() + xas_split()
one. For example, to insert an order-0 folio when an order-9 shadow entry
is present (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of
8.
xas_try_split_min_order() is introduced to reduce the number of calls to
xas_try_split() during split.
Signed-off-by: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mattew Wilcox <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Yu Zhao <yuzhao@google.com>
---
include/linux/xarray.h | 7 +++++++
lib/xarray.c | 25 +++++++++++++++++++++++
mm/filemap.c | 45 +++++++++++++++++-------------------------
3 files changed, 50 insertions(+), 27 deletions(-)
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 4010195201c9..78eede109b1a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas);
void xas_split(struct xa_state *, void *entry, unsigned int order);
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
+unsigned int xas_try_split_min_order(unsigned int order);
#else
static inline int xa_get_order(struct xarray *xa, unsigned long index)
{
@@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry,
unsigned int order)
{
}
+
+static inline unsigned int xas_try_split_min_order(unsigned int order)
+{
+ return 0;
+}
+
#endif
/**
diff --git a/lib/xarray.c b/lib/xarray.c
index bc197c96d171..8067182d3e43 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1133,6 +1133,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
}
EXPORT_SYMBOL_GPL(xas_split);
+/**
+ * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
+ * @order: Current entry order.
+ *
+ * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
+ * no new xa_node is needed. This function provides the minimal order
+ * xas_try_split() supports.
+ *
+ * Return: the minimal order xas_try_split() supports
+ *
+ * Context: Any context.
+ *
+ */
+unsigned int xas_try_split_min_order(unsigned int order)
+{
+ if (order % XA_CHUNK_SHIFT == 0)
+ return order == 0 ? 0 : order - 1;
+
+ return order - (order % XA_CHUNK_SHIFT);
+}
+EXPORT_SYMBOL_GPL(xas_try_split_min_order);
+
/**
* xas_try_split() - Try to split a multi-index entry.
* @xas: XArray operation state.
@@ -1144,6 +1166,9 @@ EXPORT_SYMBOL_GPL(xas_split);
* needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is
* NULL. If more new xa_node are needed, the function gives EINVAL error.
*
+ * NOTE: use xas_try_split_min_order() to get next split order instead of
+ * @order - 1 if you want to minmize xas_try_split() calls.
+ *
* Context: Any context. The caller should hold the xa_lock.
*/
void xas_try_split(struct xa_state *xas, void *entry, unsigned int order)
diff --git a/mm/filemap.c b/mm/filemap.c
index 2b860b59a521..cfb49ed659a1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -857,11 +857,10 @@ EXPORT_SYMBOL_GPL(replace_page_cache_folio);
noinline int __filemap_add_folio(struct address_space *mapping,
struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
{
- XA_STATE(xas, &mapping->i_pages, index);
- void *alloced_shadow = NULL;
- int alloced_order = 0;
+ XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
bool huge;
long nr;
+ unsigned int forder = folio_order(folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
@@ -870,7 +869,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
mapping_set_update(&xas, mapping);
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
- xas_set_order(&xas, index, folio_order(folio));
huge = folio_test_hugetlb(folio);
nr = folio_nr_pages(folio);
@@ -880,7 +878,7 @@ noinline int __filemap_add_folio(struct address_space *mapping,
folio->index = xas.xa_index;
for (;;) {
- int order = -1, split_order = 0;
+ int order = -1;
void *entry, *old = NULL;
xas_lock_irq(&xas);
@@ -898,21 +896,25 @@ noinline int __filemap_add_folio(struct address_space *mapping,
order = xas_get_order(&xas);
}
- /* entry may have changed before we re-acquire the lock */
- if (alloced_order && (old != alloced_shadow || order != alloced_order)) {
- xas_destroy(&xas);
- alloced_order = 0;
- }
-
if (old) {
- if (order > 0 && order > folio_order(folio)) {
+ if (order > 0 && order > forder) {
+ unsigned int split_order = max(forder,
+ xas_try_split_min_order(order));
+
/* How to handle large swap entries? */
BUG_ON(shmem_mapping(mapping));
- if (!alloced_order) {
- split_order = order;
- goto unlock;
+
+ while (order > forder) {
+ xas_set_order(&xas, index, split_order);
+ xas_try_split(&xas, old, order);
+ if (xas_error(&xas))
+ goto unlock;
+ order = split_order;
+ split_order =
+ max(xas_try_split_min_order(
+ split_order),
+ forder);
}
- xas_split(&xas, old, order);
xas_reset(&xas);
}
if (shadowp)
@@ -936,17 +938,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
unlock:
xas_unlock_irq(&xas);
- /* split needed, alloc here and retry. */
- if (split_order) {
- xas_split_alloc(&xas, old, split_order, gfp);
- if (xas_error(&xas))
- goto error;
- alloced_shadow = old;
- alloced_order = split_order;
- xas_reset(&xas);
- continue;
- }
-
if (!xas_nomem(&xas, gfp))
break;
}
--
2.47.2
Hello,
On Wed, 26 Feb 2025 16:08:53 -0500 Zi Yan <ziy@nvidia.com> wrote:
> During __filemap_add_folio(), a shadow entry is covering n slots and a
> folio covers m slots with m < n is to be added. Instead of splitting all
> n slots, only the m slots covered by the folio need to be split and the
> remaining n-m shadow entries can be retained with orders ranging from m to
> n-1. This method only requires
>
> (n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT)
>
> new xa_nodes instead of
> (n % XA_CHUNK_SHIFT) * ((n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT))
>
> new xa_nodes, compared to the original xas_split_alloc() + xas_split()
> one. For example, to insert an order-0 folio when an order-9 shadow entry
> is present (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of
> 8.
>
> xas_try_split_min_order() is introduced to reduce the number of calls to
> xas_try_split() during split.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Kairui Song <kasong@tencent.com>
> Cc: Miaohe Lin <linmiaohe@huawei.com>
> Cc: Mattew Wilcox <willy@infradead.org>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: John Hubbard <jhubbard@nvidia.com>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Yang Shi <yang@os.amperecomputing.com>
> Cc: Yu Zhao <yuzhao@google.com>
> ---
> include/linux/xarray.h | 7 +++++++
> lib/xarray.c | 25 +++++++++++++++++++++++
> mm/filemap.c | 45 +++++++++++++++++-------------------------
> 3 files changed, 50 insertions(+), 27 deletions(-)
>
> diff --git a/include/linux/xarray.h b/include/linux/xarray.h
> index 4010195201c9..78eede109b1a 100644
> --- a/include/linux/xarray.h
> +++ b/include/linux/xarray.h
> @@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas);
> void xas_split(struct xa_state *, void *entry, unsigned int order);
> void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
> void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
> +unsigned int xas_try_split_min_order(unsigned int order);
> #else
> static inline int xa_get_order(struct xarray *xa, unsigned long index)
> {
> @@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry,
> unsigned int order)
> {
> }
> +
> +static inline unsigned int xas_try_split_min_order(unsigned int order)
> +{
> + return 0;
> +}
> +
> #endif
>
> /**
> diff --git a/lib/xarray.c b/lib/xarray.c
> index bc197c96d171..8067182d3e43 100644
> --- a/lib/xarray.c
> +++ b/lib/xarray.c
> @@ -1133,6 +1133,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
> }
> EXPORT_SYMBOL_GPL(xas_split);
>
> +/**
> + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
> + * @order: Current entry order.
> + *
> + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
> + * no new xa_node is needed. This function provides the minimal order
> + * xas_try_split() supports.
> + *
> + * Return: the minimal order xas_try_split() supports
> + *
> + * Context: Any context.
> + *
> + */
> +unsigned int xas_try_split_min_order(unsigned int order)
> +{
> + if (order % XA_CHUNK_SHIFT == 0)
> + return order == 0 ? 0 : order - 1;
> +
> + return order - (order % XA_CHUNK_SHIFT);
> +}
> +EXPORT_SYMBOL_GPL(xas_try_split_min_order);
> +
I found this makes build fails when CONFIG_XARRAY_MULTI is unset, like below.
/linux/lib/xarray.c:1251:14: error: redefinition of ‘xas_try_split_min_order’
1251 | unsigned int xas_try_split_min_order(unsigned int order)
| ^~~~~~~~~~~~~~~~~~~~~~~
In file included from /linux/lib/xarray.c:13:
/linux/include/linux/xarray.h:1587:28: note: previous definition of ‘xas_try_split_min_order’ with type ‘unsigned int(unsigned int)’
1587 | static inline unsigned int xas_try_split_min_order(unsigned int order)
| ^~~~~~~~~~~~~~~~~~~~~~~
I think we should have the definition only when CONFIG_XARRAY_MULTI?
Thanks,
SJ
[...]
On 8 Mar 2025, at 13:14, SeongJae Park wrote:
> Hello,
>
> On Wed, 26 Feb 2025 16:08:53 -0500 Zi Yan <ziy@nvidia.com> wrote:
>
>> During __filemap_add_folio(), a shadow entry is covering n slots and a
>> folio covers m slots with m < n is to be added. Instead of splitting all
>> n slots, only the m slots covered by the folio need to be split and the
>> remaining n-m shadow entries can be retained with orders ranging from m to
>> n-1. This method only requires
>>
>> (n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT)
>>
>> new xa_nodes instead of
>> (n % XA_CHUNK_SHIFT) * ((n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT))
>>
>> new xa_nodes, compared to the original xas_split_alloc() + xas_split()
>> one. For example, to insert an order-0 folio when an order-9 shadow entry
>> is present (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of
>> 8.
>>
>> xas_try_split_min_order() is introduced to reduce the number of calls to
>> xas_try_split() during split.
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
>> Cc: Hugh Dickins <hughd@google.com>
>> Cc: Kairui Song <kasong@tencent.com>
>> Cc: Miaohe Lin <linmiaohe@huawei.com>
>> Cc: Mattew Wilcox <willy@infradead.org>
>> Cc: David Hildenbrand <david@redhat.com>
>> Cc: John Hubbard <jhubbard@nvidia.com>
>> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
>> Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com>
>> Cc: Ryan Roberts <ryan.roberts@arm.com>
>> Cc: Yang Shi <yang@os.amperecomputing.com>
>> Cc: Yu Zhao <yuzhao@google.com>
>> ---
>> include/linux/xarray.h | 7 +++++++
>> lib/xarray.c | 25 +++++++++++++++++++++++
>> mm/filemap.c | 45 +++++++++++++++++-------------------------
>> 3 files changed, 50 insertions(+), 27 deletions(-)
>>
>> diff --git a/include/linux/xarray.h b/include/linux/xarray.h
>> index 4010195201c9..78eede109b1a 100644
>> --- a/include/linux/xarray.h
>> +++ b/include/linux/xarray.h
>> @@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas);
>> void xas_split(struct xa_state *, void *entry, unsigned int order);
>> void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
>> void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
>> +unsigned int xas_try_split_min_order(unsigned int order);
>> #else
>> static inline int xa_get_order(struct xarray *xa, unsigned long index)
>> {
>> @@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry,
>> unsigned int order)
>> {
>> }
>> +
>> +static inline unsigned int xas_try_split_min_order(unsigned int order)
>> +{
>> + return 0;
>> +}
>> +
>> #endif
>>
>> /**
>> diff --git a/lib/xarray.c b/lib/xarray.c
>> index bc197c96d171..8067182d3e43 100644
>> --- a/lib/xarray.c
>> +++ b/lib/xarray.c
>> @@ -1133,6 +1133,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
>> }
>> EXPORT_SYMBOL_GPL(xas_split);
>>
>> +/**
>> + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
>> + * @order: Current entry order.
>> + *
>> + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
>> + * no new xa_node is needed. This function provides the minimal order
>> + * xas_try_split() supports.
>> + *
>> + * Return: the minimal order xas_try_split() supports
>> + *
>> + * Context: Any context.
>> + *
>> + */
>> +unsigned int xas_try_split_min_order(unsigned int order)
>> +{
>> + if (order % XA_CHUNK_SHIFT == 0)
>> + return order == 0 ? 0 : order - 1;
>> +
>> + return order - (order % XA_CHUNK_SHIFT);
>> +}
>> +EXPORT_SYMBOL_GPL(xas_try_split_min_order);
>> +
>
> I found this makes build fails when CONFIG_XARRAY_MULTI is unset, like below.
>
> /linux/lib/xarray.c:1251:14: error: redefinition of ‘xas_try_split_min_order’
> 1251 | unsigned int xas_try_split_min_order(unsigned int order)
> | ^~~~~~~~~~~~~~~~~~~~~~~
> In file included from /linux/lib/xarray.c:13:
> /linux/include/linux/xarray.h:1587:28: note: previous definition of ‘xas_try_split_min_order’ with type ‘unsigned int(unsigned int)’
> 1587 | static inline unsigned int xas_try_split_min_order(unsigned int order)
> | ^~~~~~~~~~~~~~~~~~~~~~~
>
> I think we should have the definition only when CONFIG_XARRAY_MULTI?
I think it might be a merge issue, since my original patch[1] places
xas_try_split_min_order() above xas_try_split(), both of which are
in #ifdef CONFIG_XARRAY_MULTI #endif. But mm-everything-2025-03-08-00-43
seems to move xas_try_split_min_order() below xas_try_split() and
out of CONFIG_XARRAY_MULTI guard.
[1] https://lore.kernel.org/linux-mm/20250226210854.2045816-2-ziy@nvidia.com/
--
Best Regards,
Yan, Zi
On Sat, 08 Mar 2025 13:32:02 -0500 Zi Yan <ziy@nvidia.com> wrote:
> On 8 Mar 2025, at 13:14, SeongJae Park wrote:
>
> > Hello,
> >
> > On Wed, 26 Feb 2025 16:08:53 -0500 Zi Yan <ziy@nvidia.com> wrote:
[...]
> >> diff --git a/include/linux/xarray.h b/include/linux/xarray.h
> >> index 4010195201c9..78eede109b1a 100644
> >> --- a/include/linux/xarray.h
> >> +++ b/include/linux/xarray.h
> >> @@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas);
> >> void xas_split(struct xa_state *, void *entry, unsigned int order);
> >> void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
> >> void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
> >> +unsigned int xas_try_split_min_order(unsigned int order);
> >> #else
> >> static inline int xa_get_order(struct xarray *xa, unsigned long index)
> >> {
> >> @@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry,
> >> unsigned int order)
> >> {
> >> }
> >> +
> >> +static inline unsigned int xas_try_split_min_order(unsigned int order)
> >> +{
> >> + return 0;
> >> +}
> >> +
> >> #endif
> >>
> >> /**
> >> diff --git a/lib/xarray.c b/lib/xarray.c
> >> index bc197c96d171..8067182d3e43 100644
> >> --- a/lib/xarray.c
> >> +++ b/lib/xarray.c
> >> @@ -1133,6 +1133,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
> >> }
> >> EXPORT_SYMBOL_GPL(xas_split);
> >>
> >> +/**
> >> + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
> >> + * @order: Current entry order.
> >> + *
> >> + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
> >> + * no new xa_node is needed. This function provides the minimal order
> >> + * xas_try_split() supports.
> >> + *
> >> + * Return: the minimal order xas_try_split() supports
> >> + *
> >> + * Context: Any context.
> >> + *
> >> + */
> >> +unsigned int xas_try_split_min_order(unsigned int order)
> >> +{
> >> + if (order % XA_CHUNK_SHIFT = 0)
> >> + return order = 0 ? 0 : order - 1;
> >> +
> >> + return order - (order % XA_CHUNK_SHIFT);
> >> +}
> >> +EXPORT_SYMBOL_GPL(xas_try_split_min_order);
> >> +
> >
> > I found this makes build fails when CONFIG_XARRAY_MULTI is unset, like below.
> >
> > /linux/lib/xarray.c:1251:14: error: redefinition of ‘xas_try_split_min_order’
> > 1251 | unsigned int xas_try_split_min_order(unsigned int order)
> > | ^~~~~~~~~~~~~~~~~~~~~~~
> > In file included from /linux/lib/xarray.c:13:
> > /linux/include/linux/xarray.h:1587:28: note: previous definition of ‘xas_try_split_min_order’ with type ‘unsigned int(unsigned int)’
> > 1587 | static inline unsigned int xas_try_split_min_order(unsigned int order)
> > | ^~~~~~~~~~~~~~~~~~~~~~~
> >
> > I think we should have the definition only when CONFIG_XARRAY_MULTI?
>
> I think it might be a merge issue, since my original patch[1] places
> xas_try_split_min_order() above xas_try_split(), both of which are
> in #ifdef CONFIG_XARRAY_MULTI #endif. But mm-everything-2025-03-08-00-43
> seems to move xas_try_split_min_order() below xas_try_split() and
> out of CONFIG_XARRAY_MULTI guard.
You're right. I was testing this on the mm-unstable tree, more specifically,
commit 2f0c87542d97.
I confirmed the build failure goes away after moving the definition to the
original place.
>
> [1] https://lore.kernel.org/linux-mm/20250226210854.2045816-2-ziy@nvidia.com/
>
> --
> Best Regards,
> Yan, Zi
Thanks,
SJ
On 8 Mar 2025, at 13:32, Zi Yan wrote:
> On 8 Mar 2025, at 13:14, SeongJae Park wrote:
>
>> Hello,
>>
>> On Wed, 26 Feb 2025 16:08:53 -0500 Zi Yan <ziy@nvidia.com> wrote:
>>
>>> During __filemap_add_folio(), a shadow entry is covering n slots and a
>>> folio covers m slots with m < n is to be added. Instead of splitting all
>>> n slots, only the m slots covered by the folio need to be split and the
>>> remaining n-m shadow entries can be retained with orders ranging from m to
>>> n-1. This method only requires
>>>
>>> (n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT)
>>>
>>> new xa_nodes instead of
>>> (n % XA_CHUNK_SHIFT) * ((n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT))
>>>
>>> new xa_nodes, compared to the original xas_split_alloc() + xas_split()
>>> one. For example, to insert an order-0 folio when an order-9 shadow entry
>>> is present (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of
>>> 8.
>>>
>>> xas_try_split_min_order() is introduced to reduce the number of calls to
>>> xas_try_split() during split.
>>>
>>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>>> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
>>> Cc: Hugh Dickins <hughd@google.com>
>>> Cc: Kairui Song <kasong@tencent.com>
>>> Cc: Miaohe Lin <linmiaohe@huawei.com>
>>> Cc: Mattew Wilcox <willy@infradead.org>
>>> Cc: David Hildenbrand <david@redhat.com>
>>> Cc: John Hubbard <jhubbard@nvidia.com>
>>> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
>>> Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com>
>>> Cc: Ryan Roberts <ryan.roberts@arm.com>
>>> Cc: Yang Shi <yang@os.amperecomputing.com>
>>> Cc: Yu Zhao <yuzhao@google.com>
>>> ---
>>> include/linux/xarray.h | 7 +++++++
>>> lib/xarray.c | 25 +++++++++++++++++++++++
>>> mm/filemap.c | 45 +++++++++++++++++-------------------------
>>> 3 files changed, 50 insertions(+), 27 deletions(-)
>>>
>>> diff --git a/include/linux/xarray.h b/include/linux/xarray.h
>>> index 4010195201c9..78eede109b1a 100644
>>> --- a/include/linux/xarray.h
>>> +++ b/include/linux/xarray.h
>>> @@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas);
>>> void xas_split(struct xa_state *, void *entry, unsigned int order);
>>> void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
>>> void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
>>> +unsigned int xas_try_split_min_order(unsigned int order);
>>> #else
>>> static inline int xa_get_order(struct xarray *xa, unsigned long index)
>>> {
>>> @@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry,
>>> unsigned int order)
>>> {
>>> }
>>> +
>>> +static inline unsigned int xas_try_split_min_order(unsigned int order)
>>> +{
>>> + return 0;
>>> +}
>>> +
>>> #endif
>>>
>>> /**
>>> diff --git a/lib/xarray.c b/lib/xarray.c
>>> index bc197c96d171..8067182d3e43 100644
>>> --- a/lib/xarray.c
>>> +++ b/lib/xarray.c
>>> @@ -1133,6 +1133,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
>>> }
>>> EXPORT_SYMBOL_GPL(xas_split);
>>>
>>> +/**
>>> + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
>>> + * @order: Current entry order.
>>> + *
>>> + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
>>> + * no new xa_node is needed. This function provides the minimal order
>>> + * xas_try_split() supports.
>>> + *
>>> + * Return: the minimal order xas_try_split() supports
>>> + *
>>> + * Context: Any context.
>>> + *
>>> + */
>>> +unsigned int xas_try_split_min_order(unsigned int order)
>>> +{
>>> + if (order % XA_CHUNK_SHIFT == 0)
>>> + return order == 0 ? 0 : order - 1;
>>> +
>>> + return order - (order % XA_CHUNK_SHIFT);
>>> +}
>>> +EXPORT_SYMBOL_GPL(xas_try_split_min_order);
>>> +
>>
>> I found this makes build fails when CONFIG_XARRAY_MULTI is unset, like below.
>>
>> /linux/lib/xarray.c:1251:14: error: redefinition of ‘xas_try_split_min_order’
>> 1251 | unsigned int xas_try_split_min_order(unsigned int order)
>> | ^~~~~~~~~~~~~~~~~~~~~~~
>> In file included from /linux/lib/xarray.c:13:
>> /linux/include/linux/xarray.h:1587:28: note: previous definition of ‘xas_try_split_min_order’ with type ‘unsigned int(unsigned int)’
>> 1587 | static inline unsigned int xas_try_split_min_order(unsigned int order)
>> | ^~~~~~~~~~~~~~~~~~~~~~~
>>
>> I think we should have the definition only when CONFIG_XARRAY_MULTI?
>
> I think it might be a merge issue, since my original patch[1] places
> xas_try_split_min_order() above xas_try_split(), both of which are
> in #ifdef CONFIG_XARRAY_MULTI #endif. But mm-everything-2025-03-08-00-43
> seems to move xas_try_split_min_order() below xas_try_split() and
> out of CONFIG_XARRAY_MULTI guard.
>
> [1] https://lore.kernel.org/linux-mm/20250226210854.2045816-2-ziy@nvidia.com/
In addition, the new comment for xas_try_split() is added to xas_split() comment.
See https://web.git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git/tree/lib/xarray.c?h=mm-everything-2025-03-08-00-43#n1084
Something went wrong when this patch was applied.
--
Best Regards,
Yan, Zi
© 2016 - 2026 Red Hat, Inc.