From: Fan Ni <fan.ni@samsung.com>
The function unmap_hugepage_range() has two kinds of users:
1) unmap_ref_private(), which passes in the head page of a folio. Since
unmap_ref_private() already takes folio and there are no other uses
of the folio struct in the function, it is natural for
unmap_hugepage_range() to take folio also.
2) All other uses, which pass in NULL pointer.
In both cases, we can pass in folio. Refactor unmap_hugepage_range() to
take folio.
Signed-off-by: Fan Ni <fan.ni@samsung.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: David Hildenbrand <david@redhat.com>
---
include/linux/hugetlb.h | 4 ++--
mm/hugetlb.c | 7 ++++---
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 23ebf49c5d6a..f6d5f24e793c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -129,8 +129,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *,
- zap_flags_t);
+ unsigned long start, unsigned long end,
+ struct folio *, zap_flags_t);
void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0c2b264a7ab8..c339ffe05556 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6046,7 +6046,7 @@ void __hugetlb_zap_end(struct vm_area_struct *vma,
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
+ unsigned long end, struct folio *folio,
zap_flags_t zap_flags)
{
struct mmu_notifier_range range;
@@ -6058,7 +6058,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
mmu_notifier_invalidate_range_start(&range);
tlb_gather_mmu(&tlb, vma->vm_mm);
- __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
+ __unmap_hugepage_range(&tlb, vma, start, end,
+ &folio->page, zap_flags);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
@@ -6116,7 +6117,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
unmap_hugepage_range(iter_vma, address,
address + huge_page_size(h),
- &folio->page, 0);
+ folio, 0);
}
i_mmap_unlock_write(mapping);
}
--
2.47.2
On Mon, May 05, 2025 at 11:22:42AM -0700, nifan.cxl@gmail.com wrote:
> From: Fan Ni <fan.ni@samsung.com>
>
> The function unmap_hugepage_range() has two kinds of users:
> 1) unmap_ref_private(), which passes in the head page of a folio. Since
> unmap_ref_private() already takes folio and there are no other uses
> of the folio struct in the function, it is natural for
> unmap_hugepage_range() to take folio also.
> 2) All other uses, which pass in NULL pointer.
>
> In both cases, we can pass in folio. Refactor unmap_hugepage_range() to
> take folio.
It looks like unmap_ref_private() is the only caller that cares about
passing a particular folio to unmap_hugepage_range(). Is there any
reason we shouldn't drop the folio argument and call
__unmap_hugepage_range() directly?
> Signed-off-by: Fan Ni <fan.ni@samsung.com>
> Reviewed-by: Muchun Song <muchun.song@linux.dev>
> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> Reviewed-by: Oscar Salvador <osalvador@suse.de>
> Acked-by: David Hildenbrand <david@redhat.com>
> ---
> include/linux/hugetlb.h | 4 ++--
> mm/hugetlb.c | 7 ++++---
> 2 files changed, 6 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 23ebf49c5d6a..f6d5f24e793c 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -129,8 +129,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
> int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
> struct vm_area_struct *, struct vm_area_struct *);
> void unmap_hugepage_range(struct vm_area_struct *,
> - unsigned long, unsigned long, struct page *,
> - zap_flags_t);
> + unsigned long start, unsigned long end,
> + struct folio *, zap_flags_t);
> void __unmap_hugepage_range(struct mmu_gather *tlb,
> struct vm_area_struct *vma,
> unsigned long start, unsigned long end,
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0c2b264a7ab8..c339ffe05556 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6046,7 +6046,7 @@ void __hugetlb_zap_end(struct vm_area_struct *vma,
> }
>
> void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
> - unsigned long end, struct page *ref_page,
> + unsigned long end, struct folio *folio,
> zap_flags_t zap_flags)
> {
> struct mmu_notifier_range range;
> @@ -6058,7 +6058,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
> mmu_notifier_invalidate_range_start(&range);
> tlb_gather_mmu(&tlb, vma->vm_mm);
>
> - __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
> + __unmap_hugepage_range(&tlb, vma, start, end,
> + &folio->page, zap_flags);
>
> mmu_notifier_invalidate_range_end(&range);
> tlb_finish_mmu(&tlb);
> @@ -6116,7 +6117,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
> if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
> unmap_hugepage_range(iter_vma, address,
> address + huge_page_size(h),
> - &folio->page, 0);
> + folio, 0);
> }
> i_mmap_unlock_write(mapping);
> }
> --
> 2.47.2
>
On Mon, 5 May 2025 12:59:20 -0700 "Vishal Moola (Oracle)" <vishal.moola@gmail.com> wrote: > On Mon, May 05, 2025 at 11:22:42AM -0700, nifan.cxl@gmail.com wrote: > > From: Fan Ni <fan.ni@samsung.com> > > > > The function unmap_hugepage_range() has two kinds of users: > > 1) unmap_ref_private(), which passes in the head page of a folio. Since > > unmap_ref_private() already takes folio and there are no other uses > > of the folio struct in the function, it is natural for > > unmap_hugepage_range() to take folio also. > > 2) All other uses, which pass in NULL pointer. > > > > In both cases, we can pass in folio. Refactor unmap_hugepage_range() to > > take folio. > > It looks like unmap_ref_private() is the only caller that cares about > passing a particular folio to unmap_hugepage_range(). Is there any > reason we shouldn't drop the folio argument and call > __unmap_hugepage_range() directly? afaict there was no response to this review comment. I'll proceed with the patchset, but please let's not lose sight of this.
© 2016 - 2025 Red Hat, Inc.