[PATCH v4 4/4] mm/hugetlb: Convert use of struct page to folio in __unmap_hugepage_range()

nifan.cxl@gmail.com posted 4 patches 7 months, 2 weeks ago
[PATCH v4 4/4] mm/hugetlb: Convert use of struct page to folio in __unmap_hugepage_range()
Posted by nifan.cxl@gmail.com 7 months, 2 weeks ago
From: Fan Ni <fan.ni@samsung.com>

In __unmap_hugepage_range(), the "page" pointer always points to the
first page of a huge page, which guarantees there is a folio associating
with it.  Convert the "page" pointer to use folio.

Signed-off-by: Fan Ni <fan.ni@samsung.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: David Hildenbrand <david@redhat.com>
---
 mm/hugetlb.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 443b75e116cf..d53caf96a4b2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5843,11 +5843,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 			    struct folio *folio, zap_flags_t zap_flags)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	const bool folio_provided = !!folio;
 	unsigned long address;
 	pte_t *ptep;
 	pte_t pte;
 	spinlock_t *ptl;
-	struct page *page;
 	struct hstate *h = hstate_vma(vma);
 	unsigned long sz = huge_page_size(h);
 	bool adjust_reservation = false;
@@ -5911,14 +5911,13 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 			continue;
 		}
 
-		page = pte_page(pte);
 		/*
 		 * If a folio is supplied, it is because a specific
 		 * folio is being unmapped, not a range. Ensure the folio we
 		 * are about to unmap is the actual folio of interest.
 		 */
-		if (folio) {
-			if (page_folio(page) != folio) {
+		if (folio_provided) {
+			if (folio != page_folio(pte_page(pte))) {
 				spin_unlock(ptl);
 				continue;
 			}
@@ -5928,12 +5927,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 			 * looking like data was lost
 			 */
 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
+		} else {
+			folio = page_folio(pte_page(pte));
 		}
 
 		pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
 		if (huge_pte_dirty(pte))
-			set_page_dirty(page);
+			folio_mark_dirty(folio);
 		/* Leave a uffd-wp pte marker if needed */
 		if (huge_pte_uffd_wp(pte) &&
 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
@@ -5941,7 +5942,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 					make_pte_marker(PTE_MARKER_UFFD_WP),
 					sz);
 		hugetlb_count_sub(pages_per_huge_page(h), mm);
-		hugetlb_remove_rmap(page_folio(page));
+		hugetlb_remove_rmap(folio);
 
 		/*
 		 * Restore the reservation for anonymous page, otherwise the
@@ -5950,8 +5951,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		 * reservation bit.
 		 */
 		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
-		    folio_test_anon(page_folio(page))) {
-			folio_set_hugetlb_restore_reserve(page_folio(page));
+		    folio_test_anon(folio)) {
+			folio_set_hugetlb_restore_reserve(folio);
 			/* Reservation to be adjusted after the spin lock */
 			adjust_reservation = true;
 		}
@@ -5975,16 +5976,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 				 * count will not be incremented by free_huge_folio.
 				 * Act as if we consumed the reservation.
 				 */
-				folio_clear_hugetlb_restore_reserve(page_folio(page));
+				folio_clear_hugetlb_restore_reserve(folio);
 			else if (rc)
 				vma_add_reservation(h, vma, address);
 		}
 
-		tlb_remove_page_size(tlb, page, huge_page_size(h));
+		tlb_remove_page_size(tlb, folio_page(folio, 0),
+				     folio_size(folio));
 		/*
 		 * If we were instructed to unmap a specific folio, we're done.
 		 */
-		if (folio)
+		if (folio_provided)
 			break;
 	}
 	tlb_end_vma(tlb, vma);
-- 
2.47.2
Re: [PATCH v4 4/4] mm/hugetlb: Convert use of struct page to folio in __unmap_hugepage_range()
Posted by Andrew Morton 7 months, 2 weeks ago
On Mon,  5 May 2025 11:22:44 -0700 nifan.cxl@gmail.com wrote:

> From: Fan Ni <fan.ni@samsung.com>
> 
> In __unmap_hugepage_range(), the "page" pointer always points to the
> first page of a huge page, which guarantees there is a folio associating
> with it.  Convert the "page" pointer to use folio.
> 
> ...
>
>  		 * Restore the reservation for anonymous page, otherwise the
> @@ -5950,8 +5951,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>  		 * reservation bit.
>  		 */
>  		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
> -		    folio_test_anon(page_folio(page))) {
> -			folio_set_hugetlb_restore_reserve(page_folio(page));
> +		    folio_test_anon(folio)) {
> +			folio_set_hugetlb_restore_reserve(folio);
>  			/* Reservation to be adjusted after the spin lock */
>  			adjust_reservation = true;
>  		}
> @@ -5975,16 +5976,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>  				 * count will not be incremented by free_huge_folio.
>  				 * Act as if we consumed the reservation.
>  				 */

I did not enjoy reading the above comment, so I did this to it.

The comment would be better if it described why we "Act as if we
consumed the reservation.".  "why, not what".


From: Andrew Morton <akpm@linux-foundation.org>
Subject: mm/hugetlb.c: __unmap_hugepage_range(): comment cleanup
Date: Mon May  5 02:54:25 PM PDT 2025

Wrap to 80 cols, fix a typo, use regular layout, parenthesize function
identifiers, fix grammar and add braces.

Cc: David Hildenbrand <david@redhat.com>
Cc: Fan Ni <fan.ni@samsung.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 mm/hugetlb.c |   17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

--- a/mm/hugetlb.c~mm-hugetlbc-__unmap_hugepage_range-comment-cleanup
+++ a/mm/hugetlb.c
@@ -5969,16 +5969,19 @@ void __unmap_hugepage_range(struct mmu_g
 		if (adjust_reservation) {
 			int rc = vma_needs_reservation(h, vma, address);
 
-			if (rc < 0)
-				/* Pressumably allocate_file_region_entries failed
-				 * to allocate a file_region struct. Clear
-				 * hugetlb_restore_reserve so that global reserve
-				 * count will not be incremented by free_huge_folio.
-				 * Act as if we consumed the reservation.
+			if (rc < 0) {
+				/*
+				 * Presumably allocate_file_region_entries()
+				 * failed to allocate a file_region struct.
+				 * Clear hugetlb_restore_reserve so that the
+				 * global reserve count will not be incremented
+				 * by free_huge_folio().  Act as if we consumed
+				 * the reservation.
 				 */
 				folio_clear_hugetlb_restore_reserve(folio);
-			else if (rc)
+			} else if (rc) {
 				vma_add_reservation(h, vma, address);
+			}
 		}
 
 		tlb_remove_page_size(tlb, folio_page(folio, 0),
_