[PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix

Shivank Garg posted 5 patches 2 weeks, 5 days ago
include/linux/khugepaged.h |   9 +--
mm/khugepaged.c            | 149 +++++++++++++++++++------------------
2 files changed, 79 insertions(+), 79 deletions(-)
[PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix
Posted by Shivank Garg 2 weeks, 5 days ago
This series contains several cleanups for mm/khugepaged.c to improve code
readability and type safety, and one functional fix to ensure
khugepaged_scan_mm_slot() correctly accounts for small VMAs towards
scan limit.

To apply this series on mm-new, please drop:
- 20251215084615.5283-3-shivankg@amd.com:
  [PATCH V4 0/2] mm/khugepaged: fix dirty page handling for MADV_COLLAPSE
- 20251224111351.41042-4-shivankg@amd.com:
  [PATCH V2 0/5] mm/khugepaged: cleanups and scan limit fix

- Apply https://lore.kernel.org/all/20260118190939.8986-2-shivankg@amd.com
  [PATCH V5 0/2] mm/khugepaged: fix dirty page handling for MADV_COLLAPSE

Thanks,

v3:
- Fold mm-khugepaged-count-small-vmas-towards-scan-limit-fix: add comment (Lance)
- Remove extern and use two tabs indent (David) 

v2:
- https://lore.kernel.org/all/20251224111351.41042-4-shivankg@amd.com
- Added a fix for small VMAs not being counted in the scan limit (Wei)
- Updated 'progress' to 'unsigned int' to match types
- Update return types of internal functions to use enum scan_result (Zi)
- Add void wrapper collapse_pte_mapped_thp() for external callers to avoid
  exposing internal enum (David)

v1:
https://lore.kernel.org/linux-mm/20251216111139.95438-2-shivankg@amd.com

Shivank Garg (5):
  mm/khugepaged: remove unnecessary goto 'skip' label
  mm/khugepaged: count small VMAs towards scan limit
  mm/khugepaged: change collapse_pte_mapped_thp() to return void
  mm/khugepaged: use enum scan_result for result variables and return
    types
  mm/khugepaged: make khugepaged_collapse_control static

 include/linux/khugepaged.h |   9 +--
 mm/khugepaged.c            | 149 +++++++++++++++++++------------------
 2 files changed, 79 insertions(+), 79 deletions(-)

-- 
2.43.0
Re: [PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix
Posted by Andrew Morton 2 weeks, 5 days ago
On Sun, 18 Jan 2026 19:22:51 +0000 Shivank Garg <shivankg@amd.com> wrote:

> This series contains several cleanups for mm/khugepaged.c to improve code
> readability and type safety, and one functional fix to ensure
> khugepaged_scan_mm_slot() correctly accounts for small VMAs towards
> scan limit.
> 

That's a lot of changes to a well-reviewed 24 day old patchset.

> 
> v3:
> - Fold mm-khugepaged-count-small-vmas-towards-scan-limit-fix: add comment (Lance)
> - Remove extern and use two tabs indent (David) 

Are you sure?  The v2->v3 diff is large.  A lot of (unchangelogged)
alterations from `int' to `enum scan_result'.

It all looks pretty simple/straightforward to me but again, can
reviewers please check this over fairly soonly, thanks.



--- a/include/linux/khugepaged.h~b
+++ a/include/linux/khugepaged.h
@@ -17,8 +17,8 @@ extern void khugepaged_enter_vma(struct
 				 vm_flags_t vm_flags);
 extern void khugepaged_min_free_kbytes_update(void);
 extern bool current_is_khugepaged(void);
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-				    bool install_pmd);
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+		bool install_pmd);
 
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
@@ -43,7 +43,7 @@ static inline void khugepaged_enter_vma(
 {
 }
 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
-					   unsigned long addr, bool install_pmd)
+		unsigned long addr, bool install_pmd)
 {
 }
 
--- a/mm/khugepaged.c~b
+++ a/mm/khugepaged.c
@@ -537,17 +537,16 @@ static void release_pte_pages(pte_t *pte
 	}
 }
 
-static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
-					unsigned long start_addr,
-					pte_t *pte,
-					struct collapse_control *cc,
-					struct list_head *compound_pagelist)
+static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
+		unsigned long start_addr, pte_t *pte, struct collapse_control *cc,
+		struct list_head *compound_pagelist)
 {
 	struct page *page = NULL;
 	struct folio *folio = NULL;
 	unsigned long addr = start_addr;
 	pte_t *_pte;
-	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
+	int none_or_zero = 0, shared = 0, referenced = 0;
+	enum scan_result result = SCAN_FAIL;
 
 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 	     _pte++, addr += PAGE_SIZE) {
@@ -780,13 +779,13 @@ static void __collapse_huge_page_copy_fa
  * @ptl: lock on raw pages' PTEs
  * @compound_pagelist: list that stores compound pages
  */
-static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
+static enum scan_result __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
 		pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
 		unsigned long address, spinlock_t *ptl,
 		struct list_head *compound_pagelist)
 {
 	unsigned int i;
-	int result = SCAN_SUCCEED;
+	enum scan_result result = SCAN_SUCCEED;
 
 	/*
 	 * Copying pages' contents is subject to memory poison at any iteration.
@@ -898,10 +897,8 @@ static int hpage_collapse_find_target_no
  * Returns enum scan_result value.
  */
 
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
-				   bool expect_anon,
-				   struct vm_area_struct **vmap,
-				   struct collapse_control *cc)
+static enum scan_result hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+		bool expect_anon, struct vm_area_struct **vmap, struct collapse_control *cc)
 {
 	struct vm_area_struct *vma;
 	enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
@@ -930,7 +927,7 @@ static int hugepage_vma_revalidate(struc
 	return SCAN_SUCCEED;
 }
 
-static inline int check_pmd_state(pmd_t *pmd)
+static inline enum scan_result check_pmd_state(pmd_t *pmd)
 {
 	pmd_t pmde = pmdp_get_lockless(pmd);
 
@@ -953,9 +950,8 @@ static inline int check_pmd_state(pmd_t
 	return SCAN_SUCCEED;
 }
 
-static int find_pmd_or_thp_or_none(struct mm_struct *mm,
-				   unsigned long address,
-				   pmd_t **pmd)
+static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
+		unsigned long address, pmd_t **pmd)
 {
 	*pmd = mm_find_pmd(mm, address);
 	if (!*pmd)
@@ -964,12 +960,11 @@ static int find_pmd_or_thp_or_none(struc
 	return check_pmd_state(*pmd);
 }
 
-static int check_pmd_still_valid(struct mm_struct *mm,
-				 unsigned long address,
-				 pmd_t *pmd)
+static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
+		unsigned long address, pmd_t *pmd)
 {
 	pmd_t *new_pmd;
-	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
+	enum scan_result result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
 
 	if (result != SCAN_SUCCEED)
 		return result;
@@ -985,15 +980,14 @@ static int check_pmd_still_valid(struct
  * Called and returns without pte mapped or spinlocks held.
  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
  */
-static int __collapse_huge_page_swapin(struct mm_struct *mm,
-				       struct vm_area_struct *vma,
-				       unsigned long start_addr, pmd_t *pmd,
-				       int referenced)
+static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
+		struct vm_area_struct *vma, unsigned long start_addr, pmd_t *pmd,
+		int referenced)
 {
 	int swapped_in = 0;
 	vm_fault_t ret = 0;
 	unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
-	int result;
+	enum scan_result result;
 	pte_t *pte = NULL;
 	spinlock_t *ptl;
 
@@ -1062,8 +1056,8 @@ out:
 	return result;
 }
 
-static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
-			      struct collapse_control *cc)
+static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
+		struct collapse_control *cc)
 {
 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
 		     GFP_TRANSHUGE);
@@ -1090,9 +1084,8 @@ static int alloc_charge_folio(struct fol
 	return SCAN_SUCCEED;
 }
 
-static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
-			      int referenced, int unmapped,
-			      struct collapse_control *cc)
+static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long address,
+		int referenced, int unmapped, struct collapse_control *cc)
 {
 	LIST_HEAD(compound_pagelist);
 	pmd_t *pmd, _pmd;
@@ -1100,7 +1093,7 @@ static int collapse_huge_page(struct mm_
 	pgtable_t pgtable;
 	struct folio *folio;
 	spinlock_t *pmd_ptl, *pte_ptl;
-	int result = SCAN_FAIL;
+	enum scan_result result = SCAN_FAIL;
 	struct vm_area_struct *vma;
 	struct mmu_notifier_range range;
 
@@ -1246,15 +1239,14 @@ out_nolock:
 	return result;
 }
 
-static int hpage_collapse_scan_pmd(struct mm_struct *mm,
-				   struct vm_area_struct *vma,
-				   unsigned long start_addr, bool *mmap_locked,
-				   struct collapse_control *cc)
+static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
+		struct vm_area_struct *vma, unsigned long start_addr, bool *mmap_locked,
+		struct collapse_control *cc)
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
-	int result = SCAN_FAIL, referenced = 0;
-	int none_or_zero = 0, shared = 0;
+	int none_or_zero = 0, shared = 0, referenced = 0;
+	enum scan_result result = SCAN_FAIL;
 	struct page *page = NULL;
 	struct folio *folio = NULL;
 	unsigned long addr;
@@ -1441,8 +1433,8 @@ static void collect_mm_slot(struct mm_sl
 }
 
 /* folio must be locked, and mmap_lock must be held */
-static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
-			pmd_t *pmdp, struct folio *folio, struct page *page)
+static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
+		pmd_t *pmdp, struct folio *folio, struct page *page)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_fault vmf = {
@@ -1477,10 +1469,11 @@ static int set_huge_pmd(struct vm_area_s
 	return SCAN_SUCCEED;
 }
 
-static int try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-				       bool install_pmd)
+static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+		bool install_pmd)
 {
-	int nr_mapped_ptes = 0, result = SCAN_FAIL;
+	enum scan_result result = SCAN_FAIL;
+	int nr_mapped_ptes = 0;
 	unsigned int nr_batch_ptes;
 	struct mmu_notifier_range range;
 	bool notified = false;
@@ -1712,7 +1705,7 @@ drop_folio:
  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
  */
 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-			     bool install_pmd)
+		bool install_pmd)
 {
 	try_collapse_pte_mapped_thp(mm, addr, install_pmd);
 }
@@ -1862,9 +1855,8 @@ drop_pml:
  *    + unlock old pages
  *    + unlock and free huge page;
  */
-static int collapse_file(struct mm_struct *mm, unsigned long addr,
-			 struct file *file, pgoff_t start,
-			 struct collapse_control *cc)
+static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
+		struct file *file, pgoff_t start, struct collapse_control *cc)
 {
 	struct address_space *mapping = file->f_mapping;
 	struct page *dst;
@@ -1872,7 +1864,8 @@ static int collapse_file(struct mm_struc
 	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
-	int nr_none = 0, result = SCAN_SUCCEED;
+	enum scan_result result = SCAN_SUCCEED;
+	int nr_none = 0;
 	bool is_shmem = shmem_file(file);
 
 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
@@ -2293,16 +2286,15 @@ out:
 	return result;
 }
 
-static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
-				    struct file *file, pgoff_t start,
-				    struct collapse_control *cc)
+static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
+		struct file *file, pgoff_t start, struct collapse_control *cc)
 {
 	struct folio *folio = NULL;
 	struct address_space *mapping = file->f_mapping;
 	XA_STATE(xas, &mapping->i_pages, start);
 	int present, swap;
 	int node = NUMA_NO_NODE;
-	int result = SCAN_SUCCEED;
+	enum scan_result result = SCAN_SUCCEED;
 
 	present = 0;
 	swap = 0;
@@ -2400,7 +2392,7 @@ static int hpage_collapse_scan_file(stru
 	return result;
 }
 
-static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
+static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result *result,
 					    struct collapse_control *cc)
 	__releases(&khugepaged_mm_lock)
 	__acquires(&khugepaged_mm_lock)
@@ -2562,7 +2554,7 @@ static void khugepaged_do_scan(struct co
 	unsigned int progress = 0, pass_through_head = 0;
 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
 	bool wait = true;
-	int result = SCAN_SUCCEED;
+	enum scan_result result = SCAN_SUCCEED;
 
 	lru_add_drain_all();
 
@@ -2775,7 +2767,8 @@ int madvise_collapse(struct vm_area_stru
 	struct collapse_control *cc;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long hstart, hend, addr;
-	int thps = 0, last_fail = SCAN_FAIL;
+	enum scan_result last_fail = SCAN_FAIL;
+	int thps = 0;
 	bool mmap_locked = true;
 
 	BUG_ON(vma->vm_start > start);
@@ -2796,7 +2789,7 @@ int madvise_collapse(struct vm_area_stru
 	hend = end & HPAGE_PMD_MASK;
 
 	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
-		int result = SCAN_FAIL;
+		enum scan_result result = SCAN_FAIL;
 		bool triggered_wb = false;
 
 retry:
_
Re: [PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix
Posted by Garg, Shivank 2 weeks, 5 days ago

On 1/19/2026 2:04 AM, Andrew Morton wrote:
> On Sun, 18 Jan 2026 19:22:51 +0000 Shivank Garg <shivankg@amd.com> wrote:
> 
>> This series contains several cleanups for mm/khugepaged.c to improve code
>> readability and type safety, and one functional fix to ensure
>> khugepaged_scan_mm_slot() correctly accounts for small VMAs towards
>> scan limit.
>>
> 
> That's a lot of changes to a well-reviewed 24 day old patchset.
> 

Sincere apologies for the last minute churn.

>>
>> v3:
>> - Fold mm-khugepaged-count-small-vmas-towards-scan-limit-fix: add comment (Lance)
>> - Remove extern and use two tabs indent (David) 
> 
> Are you sure?  The v2->v3 diff is large.  A lot of (unchangelogged)
> alterations from `int' to `enum scan_result'.
> 
> It all looks pretty simple/straightforward to me but again, can
> reviewers please check this over fairly soonly, thanks.
> 

The diff appears large because it somehow did not capture V2 Patch 4/5.

The correct V3 vs V2 diff changes: 

diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 37b992b22bba..d7a9053ff4fe 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -17,8 +17,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
 				 vm_flags_t vm_flags);
 extern void khugepaged_min_free_kbytes_update(void);
 extern bool current_is_khugepaged(void);
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-				    bool install_pmd);
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+		bool install_pmd);
 
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
@@ -43,7 +43,7 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
 {
 }
 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
-					   unsigned long addr, bool install_pmd)
+		unsigned long addr, bool install_pmd)
 {
 }
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f790ec34400..fba6aea5bea6 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -538,10 +538,8 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
 }
 
 static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
-						     unsigned long start_addr,
-						     pte_t *pte,
-						     struct collapse_control *cc,
-						     struct list_head *compound_pagelist)
+		unsigned long start_addr, pte_t *pte, struct collapse_control *cc,
+		struct list_head *compound_pagelist)
 {
 	struct page *page = NULL;
 	struct folio *folio = NULL;
@@ -900,8 +898,7 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
  */
 
 static enum scan_result hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
-						bool expect_anon, struct vm_area_struct **vmap,
-						struct collapse_control *cc)
+		bool expect_anon, struct vm_area_struct **vmap, struct collapse_control *cc)
 {
 	struct vm_area_struct *vma;
 	enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
@@ -954,8 +951,7 @@ static inline enum scan_result check_pmd_state(pmd_t *pmd)
 }
 
 static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
-						unsigned long address,
-						pmd_t **pmd)
+		unsigned long address, pmd_t **pmd)
 {
 	*pmd = mm_find_pmd(mm, address);
 	if (!*pmd)
@@ -965,8 +961,7 @@ static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
 }
 
 static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
-					      unsigned long address,
-					      pmd_t *pmd)
+		unsigned long address, pmd_t *pmd)
 {
 	pmd_t *new_pmd;
 	enum scan_result result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
@@ -986,9 +981,8 @@ static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
  */
 static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
-						    struct vm_area_struct *vma,
-						    unsigned long start_addr, pmd_t *pmd,
-						    int referenced)
+		struct vm_area_struct *vma, unsigned long start_addr, pmd_t *pmd,
+		int referenced)
 {
 	int swapped_in = 0;
 	vm_fault_t ret = 0;
@@ -1063,7 +1057,7 @@ static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
 }
 
 static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
-					   struct collapse_control *cc)
+		struct collapse_control *cc)
 {
 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
 		     GFP_TRANSHUGE);
@@ -1091,8 +1085,7 @@ static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_stru
 }
 
 static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long address,
-					   int referenced, int unmapped,
-					   struct collapse_control *cc)
+		int referenced, int unmapped, struct collapse_control *cc)
 {
 	LIST_HEAD(compound_pagelist);
 	pmd_t *pmd, _pmd;
@@ -1247,9 +1240,8 @@ static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long a
 }
 
 static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
-						struct vm_area_struct *vma,
-						unsigned long start_addr, bool *mmap_locked,
-						struct collapse_control *cc)
+		struct vm_area_struct *vma, unsigned long start_addr, bool *mmap_locked,
+		struct collapse_control *cc)
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
@@ -1442,7 +1434,7 @@ static void collect_mm_slot(struct mm_slot *slot)
 
 /* folio must be locked, and mmap_lock must be held */
 static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
-				     pmd_t *pmdp, struct folio *folio, struct page *page)
+		pmd_t *pmdp, struct folio *folio, struct page *page)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_fault vmf = {
@@ -1478,7 +1470,7 @@ static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long a
 }
 
 static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-						    bool install_pmd)
+		bool install_pmd)
 {
 	enum scan_result result = SCAN_FAIL;
 	int nr_mapped_ptes = 0;
@@ -1713,7 +1705,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
  */
 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-			     bool install_pmd)
+		bool install_pmd)
 {
 	try_collapse_pte_mapped_thp(mm, addr, install_pmd);
 }
@@ -1864,8 +1856,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *    + unlock and free huge page;
  */
 static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
-				      struct file *file, pgoff_t start,
-				      struct collapse_control *cc)
+		struct file *file, pgoff_t start, struct collapse_control *cc)
 {
 	struct address_space *mapping = file->f_mapping;
 	struct page *dst;
@@ -2296,8 +2287,7 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
 }
 
 static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
-						 struct file *file, pgoff_t start,
-						 struct collapse_control *cc)
+		struct file *file, pgoff_t start, struct collapse_control *cc)
 {
 	struct folio *folio = NULL;
 	struct address_space *mapping = file->f_mapping;
Re: [PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix
Posted by Zi Yan 2 weeks, 5 days ago
On 18 Jan 2026, at 15:34, Andrew Morton wrote:

> On Sun, 18 Jan 2026 19:22:51 +0000 Shivank Garg <shivankg@amd.com> wrote:
>
>> This series contains several cleanups for mm/khugepaged.c to improve code
>> readability and type safety, and one functional fix to ensure
>> khugepaged_scan_mm_slot() correctly accounts for small VMAs towards
>> scan limit.
>>
>
> That's a lot of changes to a well-reviewed 24 day old patchset.
>
>>
>> v3:
>> - Fold mm-khugepaged-count-small-vmas-towards-scan-limit-fix: add comment (Lance)
>> - Remove extern and use two tabs indent (David)
>
> Are you sure?  The v2->v3 diff is large.  A lot of (unchangelogged)
> alterations from `int' to `enum scan_result'.

V2 has this change[1].


[1] https://lore.kernel.org/all/20251224111351.41042-12-shivankg@amd.com/

>
> It all looks pretty simple/straightforward to me but again, can
> reviewers please check this over fairly soonly, thanks.

I have reviewed it when it was out last year.

--
Best Regards,
Yan, Zi