mm/page_frag_cache.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-)
In commit 8218f62c9c9b ("mm: page_frag: use initial zero offset for
page_frag_alloc_align()"), the check for fragsz is moved earlier.
So when the cache is used up, and if the fragsz > PAGE_SIZE, it won't
try to refill, and just return NULL.
I tested it with fragsz:8192, cache-size:32768. After the initial four
successful allocations, it failed, even there is plenty of free memory
in the system.
To fix, revert the refill logic like before: the refill is attempted
before the check & return NULL.
Cc: linyunsheng@huawei.com
Cc: stable@vger.kernel.org
Fixes: 8218f62c9c9b ("mm: page_frag: use initial zero offset for page_frag_alloc_align()")
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
---
mm/page_frag_cache.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index d2423f30577e..82935d7e53de 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -119,19 +119,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
size = PAGE_SIZE << encoded_page_decode_order(encoded_page);
offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
if (unlikely(offset + fragsz > size)) {
- if (unlikely(fragsz > PAGE_SIZE)) {
- /*
- * The caller is trying to allocate a fragment
- * with fragsz > PAGE_SIZE but the cache isn't big
- * enough to satisfy the request, this may
- * happen in low memory conditions.
- * We don't release the cache page because
- * it could make memory pressure worse
- * so we simply return NULL here.
- */
- return NULL;
- }
-
page = encoded_page_decode_page(encoded_page);
if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
@@ -149,6 +136,19 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
/* reset page count bias and offset to start of new frag */
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = 0;
+
+ if (unlikely(fragsz > size)) {
+ /*
+ * The caller is trying to allocate a fragment
+ * with fragsz > size but the cache isn't big
+ * enough to satisfy the request, this may
+ * happen in low memory conditions.
+ * We don't release the cache page because
+ * it could make memory pressure worse
+ * so we simply return NULL here.
+ */
+ return NULL;
+ }
}
nc->pagecnt_bias--;
--
2.34.1
+cc netdev ML & Alexander
On 3/1/2025 10:03 AM, Haiyang Zhang wrote:
> In commit 8218f62c9c9b ("mm: page_frag: use initial zero offset for
> page_frag_alloc_align()"), the check for fragsz is moved earlier.
> So when the cache is used up, and if the fragsz > PAGE_SIZE, it won't
> try to refill, and just return NULL.
> I tested it with fragsz:8192, cache-size:32768. After the initial four
> successful allocations, it failed, even there is plenty of free memory
> in the system.
Hi, Haiyang
It seems the PAGE_SIZE is 4K for the tested system?
Which drivers or subsystems are passing the fragsz being bigger than
PAGE_SIZE to page_frag_alloc_align() related API?
> To fix, revert the refill logic like before: the refill is attempted
> before the check & return NULL.
page_frag API is not really for allocating memory being bigger than
PAGE_SIZE as __page_frag_cache_refill() will not try hard enough to
allocate order 3 compound page when calling __alloc_pages() and will
fail back to allocate base page as the discussed in below:
https://lore.kernel.org/all/ead00fb7-8538-45b3-8322-8a41386e7381@huawei.com/
>
> Cc: linyunsheng@huawei.com
> Cc: stable@vger.kernel.org
> Fixes: 8218f62c9c9b ("mm: page_frag: use initial zero offset for page_frag_alloc_align()")
> Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
> ---
> mm/page_frag_cache.c | 26 +++++++++++++-------------
> 1 file changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
> index d2423f30577e..82935d7e53de 100644
> --- a/mm/page_frag_cache.c
> +++ b/mm/page_frag_cache.c
> @@ -119,19 +119,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> size = PAGE_SIZE << encoded_page_decode_order(encoded_page);
> offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
> if (unlikely(offset + fragsz > size)) {
> - if (unlikely(fragsz > PAGE_SIZE)) {
> - /*
> - * The caller is trying to allocate a fragment
> - * with fragsz > PAGE_SIZE but the cache isn't big
> - * enough to satisfy the request, this may
> - * happen in low memory conditions.
> - * We don't release the cache page because
> - * it could make memory pressure worse
> - * so we simply return NULL here.
> - */
> - return NULL;
> - }
> -
> page = encoded_page_decode_page(encoded_page);
>
> if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
> @@ -149,6 +136,19 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> /* reset page count bias and offset to start of new frag */
> nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
> offset = 0;
> +
> + if (unlikely(fragsz > size)) {
> + /*
> + * The caller is trying to allocate a fragment
> + * with fragsz > size but the cache isn't big
> + * enough to satisfy the request, this may
> + * happen in low memory conditions.
> + * We don't release the cache page because
> + * it could make memory pressure worse
> + * so we simply return NULL here.
> + */
> + return NULL;
> + }
> }
>
> nc->pagecnt_bias--;
© 2016 - 2026 Red Hat, Inc.