[PATCH] mm: khugepaged: decouple SHMEM and file folios' collapse

Baolin Wang posted 1 patch 7 months, 1 week ago
include/linux/khugepaged.h |  8 --------
mm/Kconfig                 |  2 +-
mm/khugepaged.c            | 13 ++-----------
3 files changed, 3 insertions(+), 20 deletions(-)
[PATCH] mm: khugepaged: decouple SHMEM and file folios' collapse
Posted by Baolin Wang 7 months, 1 week ago
Originally, the file pages collapse was intended for tmpfs/shmem to merge
into THP in the background. However, now not only tmpfs/shmem can support
large folios, but some other file systems (such as XFS, erofs ...) also
support large folios. Therefore, it is time to decouple the support of
file folios collapse from SHMEM.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
---
Changes from RFC:
 - Add acked tag from David. Thanks.
 - Remove CONFIG_SHMEM in khugepaged.h file.
---
 include/linux/khugepaged.h |  8 --------
 mm/Kconfig                 |  2 +-
 mm/khugepaged.c            | 13 ++-----------
 3 files changed, 3 insertions(+), 20 deletions(-)

diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 1f46046080f5..b8d69cfbb58b 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -15,16 +15,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
 				 unsigned long vm_flags);
 extern void khugepaged_min_free_kbytes_update(void);
 extern bool current_is_khugepaged(void);
-#ifdef CONFIG_SHMEM
 extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 				   bool install_pmd);
-#else
-static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
-					  unsigned long addr, bool install_pmd)
-{
-	return 0;
-}
-#endif
 
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
diff --git a/mm/Kconfig b/mm/Kconfig
index d4fd40f56178..79237842f7e2 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -898,7 +898,7 @@ config THP_SWAP
 
 config READ_ONLY_THP_FOR_FS
 	bool "Read-only THP for filesystems (EXPERIMENTAL)"
-	depends on TRANSPARENT_HUGEPAGE && SHMEM
+	depends on TRANSPARENT_HUGEPAGE
 
 	help
 	  Allow khugepaged to put read-only file-backed pages in THP.
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ebcd7c8a4b44..cdf5a581368b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1464,7 +1464,6 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
 	}
 }
 
-#ifdef CONFIG_SHMEM
 /* folio must be locked, and mmap_lock must be held */
 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
 			pmd_t *pmdp, struct folio *folio, struct page *page)
@@ -2353,14 +2352,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
 	trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
 	return result;
 }
-#else
-static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
-				    struct file *file, pgoff_t start,
-				    struct collapse_control *cc)
-{
-	BUILD_BUG();
-}
-#endif
 
 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 					    struct collapse_control *cc)
@@ -2436,7 +2427,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 			VM_BUG_ON(khugepaged_scan.address < hstart ||
 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
 				  hend);
-			if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+			if (!vma_is_anonymous(vma)) {
 				struct file *file = get_file(vma->vm_file);
 				pgoff_t pgoff = linear_page_index(vma,
 						khugepaged_scan.address);
@@ -2782,7 +2773,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
 		mmap_assert_locked(mm);
 		memset(cc->node_load, 0, sizeof(cc->node_load));
 		nodes_clear(cc->alloc_nmask);
-		if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+		if (!vma_is_anonymous(vma)) {
 			struct file *file = get_file(vma->vm_file);
 			pgoff_t pgoff = linear_page_index(vma, addr);
 
-- 
2.43.5
Re: [PATCH] mm: khugepaged: decouple SHMEM and file folios' collapse
Posted by Zi Yan 7 months, 1 week ago
On 13 May 2025, at 2:56, Baolin Wang wrote:

> Originally, the file pages collapse was intended for tmpfs/shmem to merge
> into THP in the background. However, now not only tmpfs/shmem can support
> large folios, but some other file systems (such as XFS, erofs ...) also
> support large folios. Therefore, it is time to decouple the support of
> file folios collapse from SHMEM.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> Acked-by: David Hildenbrand <david@redhat.com>
> ---
> Changes from RFC:
>  - Add acked tag from David. Thanks.
>  - Remove CONFIG_SHMEM in khugepaged.h file.
> ---
>  include/linux/khugepaged.h |  8 --------
>  mm/Kconfig                 |  2 +-
>  mm/khugepaged.c            | 13 ++-----------
>  3 files changed, 3 insertions(+), 20 deletions(-)

+Song, the author of READ_ONLY_THP_FOR_FS.

The changes look good to me. Acked-by: Zi Yan <ziy@nvidia.com>

>
> diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
> index 1f46046080f5..b8d69cfbb58b 100644
> --- a/include/linux/khugepaged.h
> +++ b/include/linux/khugepaged.h
> @@ -15,16 +15,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
>  				 unsigned long vm_flags);
>  extern void khugepaged_min_free_kbytes_update(void);
>  extern bool current_is_khugepaged(void);
> -#ifdef CONFIG_SHMEM
>  extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>  				   bool install_pmd);
> -#else
> -static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
> -					  unsigned long addr, bool install_pmd)
> -{
> -	return 0;
> -}
> -#endif
>
>  static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
>  {
> diff --git a/mm/Kconfig b/mm/Kconfig
> index d4fd40f56178..79237842f7e2 100644
> --- a/mm/Kconfig
> +++ b/mm/Kconfig
> @@ -898,7 +898,7 @@ config THP_SWAP
>
>  config READ_ONLY_THP_FOR_FS
>  	bool "Read-only THP for filesystems (EXPERIMENTAL)"
> -	depends on TRANSPARENT_HUGEPAGE && SHMEM
> +	depends on TRANSPARENT_HUGEPAGE
>
>  	help
>  	  Allow khugepaged to put read-only file-backed pages in THP.
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index ebcd7c8a4b44..cdf5a581368b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1464,7 +1464,6 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
>  	}
>  }
>
> -#ifdef CONFIG_SHMEM
>  /* folio must be locked, and mmap_lock must be held */
>  static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
>  			pmd_t *pmdp, struct folio *folio, struct page *page)
> @@ -2353,14 +2352,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
>  	trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
>  	return result;
>  }
> -#else
> -static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> -				    struct file *file, pgoff_t start,
> -				    struct collapse_control *cc)
> -{
> -	BUILD_BUG();
> -}
> -#endif
>
>  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>  					    struct collapse_control *cc)
> @@ -2436,7 +2427,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>  			VM_BUG_ON(khugepaged_scan.address < hstart ||
>  				  khugepaged_scan.address + HPAGE_PMD_SIZE >
>  				  hend);
> -			if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
> +			if (!vma_is_anonymous(vma)) {
>  				struct file *file = get_file(vma->vm_file);
>  				pgoff_t pgoff = linear_page_index(vma,
>  						khugepaged_scan.address);
> @@ -2782,7 +2773,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>  		mmap_assert_locked(mm);
>  		memset(cc->node_load, 0, sizeof(cc->node_load));
>  		nodes_clear(cc->alloc_nmask);
> -		if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
> +		if (!vma_is_anonymous(vma)) {
>  			struct file *file = get_file(vma->vm_file);
>  			pgoff_t pgoff = linear_page_index(vma, addr);
>
> -- 
> 2.43.5


Best Regards,
Yan, Zi