[RFC PATCH 3/5] mm/hugetlb: set large_rmappable on hugetlb and avoid deferred_list handling

Zi Yan posted 5 patches 1 week, 2 days ago
[RFC PATCH 3/5] mm/hugetlb: set large_rmappable on hugetlb and avoid deferred_list handling
Posted by Zi Yan 1 week, 2 days ago
Commit f708f6970cc9 ("mm/hugetlb: fix kernel NULL pointer dereference when
migrating hugetlb folio") fixed a NULL pointer dereference when
folio_undo_large_rmappable(), now folio_unqueue_deferred_list(), is used on
hugetlb to clear deferred_list. It cleared large_rmappable flag on hugetlb.
hugetlb is rmappable, thus clearing large_rmappable flag looks misleading.
Instead, reject hugetlb in folio_unqueue_deferred_list() to avoid the
issue.

This prepares for code separation of compound page and folio in a follow-up
commit.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 mm/hugetlb.c     | 6 +++---
 mm/hugetlb_cma.c | 2 +-
 mm/internal.h    | 3 ++-
 3 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6e855a32de3d..7466c7bf41a1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1422,8 +1422,8 @@ static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask,
 	if (hugetlb_cma_exclusive_alloc())
 		return NULL;
 
-	folio = (struct folio *)alloc_contig_frozen_pages(1 << order, gfp_mask,
-							  nid, nodemask);
+	folio = page_rmappable_folio(alloc_contig_frozen_pages(1 << order, gfp_mask,
+							  nid, nodemask));
 	return folio;
 }
 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE || !CONFIG_CONTIG_ALLOC */
@@ -1859,7 +1859,7 @@ static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
 	if (alloc_try_hard)
 		gfp_mask |= __GFP_RETRY_MAYFAIL;
 
-	folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
+	folio = page_rmappable_folio(__alloc_frozen_pages(gfp_mask, order, nid, nmask));
 
 	/*
 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
index f83ae4998990..4245b5dda4dc 100644
--- a/mm/hugetlb_cma.c
+++ b/mm/hugetlb_cma.c
@@ -51,7 +51,7 @@ struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask,
 	if (!page)
 		return NULL;
 
-	folio = page_folio(page);
+	folio = page_rmappable_folio(page);
 	folio_set_hugetlb_cma(folio);
 	return folio;
 }
diff --git a/mm/internal.h b/mm/internal.h
index d67e8bb75734..8bb22fb9a0e1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -835,7 +835,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
 bool __folio_unqueue_deferred_split(struct folio *folio);
 static inline bool folio_unqueue_deferred_split(struct folio *folio)
 {
-	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
+	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio) ||
+	    folio_test_hugetlb(folio))
 		return false;
 
 	/*
-- 
2.51.0
Re: [RFC PATCH 3/5] mm/hugetlb: set large_rmappable on hugetlb and avoid deferred_list handling
Posted by Baolin Wang 6 days, 14 hours ago

On 1/30/26 11:48 AM, Zi Yan wrote:
> Commit f708f6970cc9 ("mm/hugetlb: fix kernel NULL pointer dereference when
> migrating hugetlb folio") fixed a NULL pointer dereference when
> folio_undo_large_rmappable(), now folio_unqueue_deferred_list(), is used on
> hugetlb to clear deferred_list. It cleared large_rmappable flag on hugetlb.
> hugetlb is rmappable, thus clearing large_rmappable flag looks misleading.
> Instead, reject hugetlb in folio_unqueue_deferred_list() to avoid the
> issue.
> 
> This prepares for code separation of compound page and folio in a follow-up
> commit.
> 
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
>   mm/hugetlb.c     | 6 +++---
>   mm/hugetlb_cma.c | 2 +-
>   mm/internal.h    | 3 ++-
>   3 files changed, 6 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 6e855a32de3d..7466c7bf41a1 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1422,8 +1422,8 @@ static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask,
>   	if (hugetlb_cma_exclusive_alloc())
>   		return NULL;
>   
> -	folio = (struct folio *)alloc_contig_frozen_pages(1 << order, gfp_mask,
> -							  nid, nodemask);
> +	folio = page_rmappable_folio(alloc_contig_frozen_pages(1 << order, gfp_mask,
> +							  nid, nodemask));
>   	return folio;
>   }
>   #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE || !CONFIG_CONTIG_ALLOC */
> @@ -1859,7 +1859,7 @@ static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
>   	if (alloc_try_hard)
>   		gfp_mask |= __GFP_RETRY_MAYFAIL;
>   
> -	folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
> +	folio = page_rmappable_folio(__alloc_frozen_pages(gfp_mask, order, nid, nmask));
>   
>   	/*
>   	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
> diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
> index f83ae4998990..4245b5dda4dc 100644
> --- a/mm/hugetlb_cma.c
> +++ b/mm/hugetlb_cma.c
> @@ -51,7 +51,7 @@ struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask,
>   	if (!page)
>   		return NULL;
>   
> -	folio = page_folio(page);
> +	folio = page_rmappable_folio(page);
>   	folio_set_hugetlb_cma(folio);
>   	return folio;
>   }

IIUC, this will break the semantics of the is_transparent_hugepage() and 
might trigger a split of a hugetlb folio, right?

static inline bool is_transparent_hugepage(const struct folio *folio)
{
	if (!folio_test_large(folio))
		return false;

	return is_huge_zero_folio(folio) ||
		folio_test_large_rmappable(folio);
}
Re: [RFC PATCH 3/5] mm/hugetlb: set large_rmappable on hugetlb and avoid deferred_list handling
Posted by Zi Yan 6 days, 1 hour ago
On 1 Feb 2026, at 22:59, Baolin Wang wrote:

> On 1/30/26 11:48 AM, Zi Yan wrote:
>> Commit f708f6970cc9 ("mm/hugetlb: fix kernel NULL pointer dereference when
>> migrating hugetlb folio") fixed a NULL pointer dereference when
>> folio_undo_large_rmappable(), now folio_unqueue_deferred_list(), is used on
>> hugetlb to clear deferred_list. It cleared large_rmappable flag on hugetlb.
>> hugetlb is rmappable, thus clearing large_rmappable flag looks misleading.
>> Instead, reject hugetlb in folio_unqueue_deferred_list() to avoid the
>> issue.
>>
>> This prepares for code separation of compound page and folio in a follow-up
>> commit.
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>>   mm/hugetlb.c     | 6 +++---
>>   mm/hugetlb_cma.c | 2 +-
>>   mm/internal.h    | 3 ++-
>>   3 files changed, 6 insertions(+), 5 deletions(-)
>>
>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>> index 6e855a32de3d..7466c7bf41a1 100644
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -1422,8 +1422,8 @@ static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask,
>>   	if (hugetlb_cma_exclusive_alloc())
>>   		return NULL;
>>  -	folio = (struct folio *)alloc_contig_frozen_pages(1 << order, gfp_mask,
>> -							  nid, nodemask);
>> +	folio = page_rmappable_folio(alloc_contig_frozen_pages(1 << order, gfp_mask,
>> +							  nid, nodemask));
>>   	return folio;
>>   }
>>   #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE || !CONFIG_CONTIG_ALLOC */
>> @@ -1859,7 +1859,7 @@ static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
>>   	if (alloc_try_hard)
>>   		gfp_mask |= __GFP_RETRY_MAYFAIL;
>>  -	folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
>> +	folio = page_rmappable_folio(__alloc_frozen_pages(gfp_mask, order, nid, nmask));
>>    	/*
>>   	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
>> diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
>> index f83ae4998990..4245b5dda4dc 100644
>> --- a/mm/hugetlb_cma.c
>> +++ b/mm/hugetlb_cma.c
>> @@ -51,7 +51,7 @@ struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask,
>>   	if (!page)
>>   		return NULL;
>>  -	folio = page_folio(page);
>> +	folio = page_rmappable_folio(page);
>>   	folio_set_hugetlb_cma(folio);
>>   	return folio;
>>   }
>
> IIUC, this will break the semantics of the is_transparent_hugepage() and might trigger a split of a hugetlb folio, right?
>
> static inline bool is_transparent_hugepage(const struct folio *folio)
> {
> 	if (!folio_test_large(folio))
> 		return false;
>
> 	return is_huge_zero_folio(folio) ||
> 		folio_test_large_rmappable(folio);
> }

Oh, I missed this. I will check all folio_test_large_rmappable() callers
and filter out hugetlb if necessary.

Thank you for pointing this out.

Best Regards,
Yan, Zi