From: Fan Ni <fan.ni@samsung.com>
In __unmap_hugepage_range(), the "page" pointer always points to the
first page of a huge page, which guarantees there is a folio associating
with it. Convert the "page" pointer to use folio.
Signed-off-by: Fan Ni <fan.ni@samsung.com>
---
mm/hugetlb.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6696206d556e..293c2afa724b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5815,12 +5815,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
pte_t *ptep;
pte_t pte;
spinlock_t *ptl;
- struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
bool adjust_reservation = false;
unsigned long last_addr_mask;
bool force_flush = false;
+ const bool folio_provided = !!folio;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -5879,14 +5879,13 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
continue;
}
- page = pte_page(pte);
/*
* If a reference page is supplied, it is because a specific
* page is being unmapped, not a range. Ensure the page we
* are about to unmap is the actual page of interest.
*/
- if (folio) {
- if (page_folio(page) != folio) {
+ if (folio_provided) {
+ if (folio != page_folio(pte_page(pte))) {
spin_unlock(ptl);
continue;
}
@@ -5896,12 +5895,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* looking like data was lost
*/
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
+ } else {
+ folio = page_folio(pte_page(pte));
}
pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
- set_page_dirty(page);
+ folio_mark_dirty(folio);
/* Leave a uffd-wp pte marker if needed */
if (huge_pte_uffd_wp(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
@@ -5909,7 +5910,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
make_pte_marker(PTE_MARKER_UFFD_WP),
sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
- hugetlb_remove_rmap(page_folio(page));
+ hugetlb_remove_rmap(folio);
/*
* Restore the reservation for anonymous page, otherwise the
@@ -5918,8 +5919,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* reservation bit.
*/
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
- folio_test_anon(page_folio(page))) {
- folio_set_hugetlb_restore_reserve(page_folio(page));
+ folio_test_anon(folio)) {
+ folio_set_hugetlb_restore_reserve(folio);
/* Reservation to be adjusted after the spin lock */
adjust_reservation = true;
}
@@ -5943,16 +5944,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* count will not be incremented by free_huge_folio.
* Act as if we consumed the reservation.
*/
- folio_clear_hugetlb_restore_reserve(page_folio(page));
+ folio_clear_hugetlb_restore_reserve(folio);
else if (rc)
vma_add_reservation(h, vma, address);
}
- tlb_remove_page_size(tlb, page, huge_page_size(h));
+ tlb_remove_page_size(tlb, folio_page(folio, 0),
+ folio_size(folio));
/*
* Bail out after unmapping reference page if supplied
*/
- if (folio)
+ if (folio_provided)
break;
}
tlb_end_vma(tlb, vma);
--
2.47.2
On Mon, Apr 28, 2025 at 10:11:47AM -0700, nifan.cxl@gmail.com wrote:
> From: Fan Ni <fan.ni@samsung.com>
>
> In __unmap_hugepage_range(), the "page" pointer always points to the
> first page of a huge page, which guarantees there is a folio associating
> with it. Convert the "page" pointer to use folio.
>
> Signed-off-by: Fan Ni <fan.ni@samsung.com>
> ---
> mm/hugetlb.c | 24 +++++++++++++-----------
> 1 file changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 6696206d556e..293c2afa724b 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5815,12 +5815,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
> pte_t *ptep;
> pte_t pte;
> spinlock_t *ptl;
> - struct page *page;
> struct hstate *h = hstate_vma(vma);
> unsigned long sz = huge_page_size(h);
> bool adjust_reservation = false;
> unsigned long last_addr_mask;
> bool force_flush = false;
> + const bool folio_provided = !!folio;
This might be me just nitpicking but:
Why not rename folio to ref_folio o provided_folio and do
struct folio *folio = ref_folio;
I think it is more natural than the boolean.
Also you need to update the comment that references the 'reference page' and the one
David mentioned.
Anyway,
Reviewed-by: Oscar Salvador <osalvador@suse.de>
--
Oscar Salvador
SUSE Labs
On 28.04.25 19:11, nifan.cxl@gmail.com wrote: > From: Fan Ni <fan.ni@samsung.com> > > In __unmap_hugepage_range(), the "page" pointer always points to the > first page of a huge page, which guarantees there is a folio associating > with it. Convert the "page" pointer to use folio. > > Signed-off-by: Fan Ni <fan.ni@samsung.com> > --- > mm/hugetlb.c | 24 +++++++++++++----------- > 1 file changed, 13 insertions(+), 11 deletions(-) > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index 6696206d556e..293c2afa724b 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -5815,12 +5815,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, > pte_t *ptep; > pte_t pte; > spinlock_t *ptl; > - struct page *page; > struct hstate *h = hstate_vma(vma); > unsigned long sz = huge_page_size(h); > bool adjust_reservation = false; > unsigned long last_addr_mask; > bool force_flush = false; > + const bool folio_provided = !!folio; I would but that all the way to the top. > } > @@ -5943,16 +5944,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, > * count will not be incremented by free_huge_folio. > * Act as if we consumed the reservation. > */ > - folio_clear_hugetlb_restore_reserve(page_folio(page)); > + folio_clear_hugetlb_restore_reserve(folio); > else if (rc) > vma_add_reservation(h, vma, address); > } > > - tlb_remove_page_size(tlb, page, huge_page_size(h)); > + tlb_remove_page_size(tlb, folio_page(folio, 0), > + folio_size(folio)); > /* > * Bail out after unmapping reference page if supplied > */ I wonder if we want to adjust that comment while at it. /* If we were instructed to unmap a specific folio, we're done. */ Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
© 2016 - 2025 Red Hat, Inc.