Currently, each PMD scan always increases `progress` by HPAGE_PMD_NR,
even if only scanning a single page. By counting the actual number of
pages scanned, the `progress` is tracked accurately.
Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
---
mm/khugepaged.c | 31 +++++++++++++++++++++++--------
1 file changed, 23 insertions(+), 8 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f99f61689f8..4b124e854e2e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1247,7 +1247,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
static int hpage_collapse_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start_addr, bool *mmap_locked,
- struct collapse_control *cc)
+ int *progress, struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
@@ -1258,23 +1258,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
unsigned long addr;
spinlock_t *ptl;
int node = NUMA_NO_NODE, unmapped = 0;
+ int _progress = 0;
VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
- if (result != SCAN_SUCCEED)
+ if (result != SCAN_SUCCEED) {
+ _progress = HPAGE_PMD_NR;
goto out;
+ }
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
if (!pte) {
+ _progress = HPAGE_PMD_NR;
result = SCAN_NO_PTE_TABLE;
goto out;
}
for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, addr += PAGE_SIZE) {
+ _progress++;
pte_t pteval = ptep_get(_pte);
if (pte_none_or_zero(pteval)) {
++none_or_zero;
@@ -1410,6 +1415,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
*mmap_locked = false;
}
out:
+ if (progress)
+ *progress += _progress;
+
trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
none_or_zero, result, unmapped);
return result;
@@ -2287,7 +2295,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
struct file *file, pgoff_t start,
- struct collapse_control *cc)
+ int *progress, struct collapse_control *cc)
{
struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping;
@@ -2295,6 +2303,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
int present, swap;
int node = NUMA_NO_NODE;
int result = SCAN_SUCCEED;
+ int _progress = 0;
present = 0;
swap = 0;
@@ -2327,6 +2336,8 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
continue;
}
+ _progress += folio_nr_pages(folio);
+
if (folio_order(folio) == HPAGE_PMD_ORDER &&
folio->index == start) {
/* Maybe PMD-mapped */
@@ -2388,6 +2399,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
}
+ if (progress)
+ *progress += _progress;
+
trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
return result;
}
@@ -2470,7 +2484,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
mmap_read_unlock(mm);
mmap_locked = false;
*result = hpage_collapse_scan_file(mm,
- khugepaged_scan.address, file, pgoff, cc);
+ khugepaged_scan.address, file, pgoff,
+ &progress, cc);
fput(file);
if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
mmap_read_lock(mm);
@@ -2484,7 +2499,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
}
} else {
*result = hpage_collapse_scan_pmd(mm, vma,
- khugepaged_scan.address, &mmap_locked, cc);
+ khugepaged_scan.address, &mmap_locked,
+ &progress, cc);
}
if (*result == SCAN_SUCCEED)
@@ -2492,7 +2508,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
- progress += HPAGE_PMD_NR;
if (!mmap_locked)
/*
* We released mmap_lock so break loop. Note
@@ -2810,11 +2825,11 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
mmap_read_unlock(mm);
mmap_locked = false;
result = hpage_collapse_scan_file(mm, addr, file, pgoff,
- cc);
+ NULL, cc);
fput(file);
} else {
result = hpage_collapse_scan_pmd(mm, vma, addr,
- &mmap_locked, cc);
+ &mmap_locked, NULL, cc);
}
if (!mmap_locked)
*lock_dropped = true;
--
2.51.0
On 1/4/26 06:41, Vernon Yang wrote:
> Currently, each PMD scan always increases `progress` by HPAGE_PMD_NR,
> even if only scanning a single page. By counting the actual number of
"... a single pmd" ?
> pages scanned, the `progress` is tracked accurately.
"page table entries / pages scanned" ?
>
> Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
> ---
> mm/khugepaged.c | 31 +++++++++++++++++++++++--------
> 1 file changed, 23 insertions(+), 8 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 9f99f61689f8..4b124e854e2e 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1247,7 +1247,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> struct vm_area_struct *vma,
> unsigned long start_addr, bool *mmap_locked,
> - struct collapse_control *cc)
> + int *progress, struct collapse_control *cc)
> {
> pmd_t *pmd;
> pte_t *pte, *_pte;
> @@ -1258,23 +1258,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> unsigned long addr;
> spinlock_t *ptl;
> int node = NUMA_NO_NODE, unmapped = 0;
> + int _progress = 0;
"cur_progress" ?
>
> VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
>
> result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
> - if (result != SCAN_SUCCEED)
> + if (result != SCAN_SUCCEED) {
> + _progress = HPAGE_PMD_NR;
> goto out;
> + }
>
> memset(cc->node_load, 0, sizeof(cc->node_load));
> nodes_clear(cc->alloc_nmask);
> pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
> if (!pte) {
> + _progress = HPAGE_PMD_NR;
> result = SCAN_NO_PTE_TABLE;
> goto out;
> }
>
> for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> _pte++, addr += PAGE_SIZE) {
> + _progress++;
> pte_t pteval = ptep_get(_pte);
> if (pte_none_or_zero(pteval)) {
> ++none_or_zero;
> @@ -1410,6 +1415,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> *mmap_locked = false;
> }
> out:
> + if (progress)
> + *progress += _progress;
> +
> trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
> none_or_zero, result, unmapped);
> return result;
> @@ -2287,7 +2295,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>
> static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> struct file *file, pgoff_t start,
> - struct collapse_control *cc)
> + int *progress, struct collapse_control *cc)
> {
> struct folio *folio = NULL;
> struct address_space *mapping = file->f_mapping;
> @@ -2295,6 +2303,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> int present, swap;
> int node = NUMA_NO_NODE;
> int result = SCAN_SUCCEED;
> + int _progress = 0;
Same here.
Not sure if it would be cleaner to just let the parent increment its
counter and returning instead the "cur_progress" from the function.
--
Cheers
David
On Mon, Jan 05, 2026 at 05:49:22PM +0100, David Hildenbrand (Red Hat) wrote:
> On 1/4/26 06:41, Vernon Yang wrote:
> > Currently, each PMD scan always increases `progress` by HPAGE_PMD_NR,
> > even if only scanning a single page. By counting the actual number of
>
> "... a single pmd" ?
>
> > pages scanned, the `progress` is tracked accurately.
>
> "page table entries / pages scanned" ?
The single page is pte-4KB only. This patch does not change the original
semantics of "progress", it simply uses the exact number of PTEs counted
to replace HPAGE_PMD_NR.
Let me provide a detailed example:
static int hpage_collapse_scan_pmd()
{
for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, addr += PAGE_SIZE) {
_progress++;
pte_t pteval = ptep_get(_pte);
...
if (pte_uffd_wp(pteval)) { <-- first scan hit
result = SCAN_PTE_UFFD_WP;
goto out_unmap;
}
}
}
During the first scan, if pte_uffd_wp(pteval) is true, the loop exits
directly. In practice, only one PTE is scanned before termination.
Here, "progress += 1" reflects the actual number of PTEs scanned, but
previously "progress += HPAGE_PMD_NR" always.
Previously discussed, just skip SCAN_PMD_MAPPED or SCAN_NO_PTE_TABLE,
currently in Patch #3, not this Patch #2.
> >
> > Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
> > ---
> > mm/khugepaged.c | 31 +++++++++++++++++++++++--------
> > 1 file changed, 23 insertions(+), 8 deletions(-)
> >
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index 9f99f61689f8..4b124e854e2e 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -1247,7 +1247,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> > static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> > struct vm_area_struct *vma,
> > unsigned long start_addr, bool *mmap_locked,
> > - struct collapse_control *cc)
> > + int *progress, struct collapse_control *cc)
> > {
> > pmd_t *pmd;
> > pte_t *pte, *_pte;
> > @@ -1258,23 +1258,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> > unsigned long addr;
> > spinlock_t *ptl;
> > int node = NUMA_NO_NODE, unmapped = 0;
> > + int _progress = 0;
>
> "cur_progress" ?
Yes.
> > VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
> > result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
> > - if (result != SCAN_SUCCEED)
> > + if (result != SCAN_SUCCEED) {
> > + _progress = HPAGE_PMD_NR;
> > goto out;
> > + }
> > memset(cc->node_load, 0, sizeof(cc->node_load));
> > nodes_clear(cc->alloc_nmask);
> > pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
> > if (!pte) {
> > + _progress = HPAGE_PMD_NR;
> > result = SCAN_NO_PTE_TABLE;
> > goto out;
> > }
> > for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> > _pte++, addr += PAGE_SIZE) {
> > + _progress++;
> > pte_t pteval = ptep_get(_pte);
> > if (pte_none_or_zero(pteval)) {
> > ++none_or_zero;
> > @@ -1410,6 +1415,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> > *mmap_locked = false;
> > }
> > out:
> > + if (progress)
> > + *progress += _progress;
> > +
> > trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
> > none_or_zero, result, unmapped);
> > return result;
> > @@ -2287,7 +2295,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> > struct file *file, pgoff_t start,
> > - struct collapse_control *cc)
> > + int *progress, struct collapse_control *cc)
> > {
> > struct folio *folio = NULL;
> > struct address_space *mapping = file->f_mapping;
> > @@ -2295,6 +2303,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> > int present, swap;
> > int node = NUMA_NO_NODE;
> > int result = SCAN_SUCCEED;
> > + int _progress = 0;
>
> Same here.
>
>
> Not sure if it would be cleaner to just let the parent increment its counter
> and returning instead the "cur_progress" from the function.
Both are good for me, I have implemented one version as follows, please
see if it is cleaner.
--
Thanks,
Vernon
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f99f61689f8..4cf24553c2bd 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1247,6 +1247,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
static int hpage_collapse_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start_addr, bool *mmap_locked,
+ int *cur_progress,
struct collapse_control *cc)
{
pmd_t *pmd;
@@ -1262,19 +1263,27 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
- if (result != SCAN_SUCCEED)
+ if (result != SCAN_SUCCEED) {
+ if (cur_progress)
+ *cur_progress = HPAGE_PMD_NR;
goto out;
+ }
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
if (!pte) {
+ if (cur_progress)
+ *cur_progress = HPAGE_PMD_NR;
result = SCAN_NO_PTE_TABLE;
goto out;
}
for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, addr += PAGE_SIZE) {
+ if (cur_progress)
+ *cur_progress += 1;
+
pte_t pteval = ptep_get(_pte);
if (pte_none_or_zero(pteval)) {
++none_or_zero;
@@ -2287,6 +2296,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
struct file *file, pgoff_t start,
+ int *cur_progress,
struct collapse_control *cc)
{
struct folio *folio = NULL;
@@ -2327,6 +2337,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
continue;
}
+ if (cur_progress)
+ *cur_progress += folio_nr_pages(folio);
+
if (folio_order(folio) == HPAGE_PMD_ORDER &&
folio->index == start) {
/* Maybe PMD-mapped */
@@ -2454,6 +2467,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
while (khugepaged_scan.address < hend) {
bool mmap_locked = true;
+ int cur_progress = 0;
cond_resched();
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
@@ -2470,7 +2484,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
mmap_read_unlock(mm);
mmap_locked = false;
*result = hpage_collapse_scan_file(mm,
- khugepaged_scan.address, file, pgoff, cc);
+ khugepaged_scan.address, file, pgoff,
+ &cur_progress, cc);
fput(file);
if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
mmap_read_lock(mm);
@@ -2484,7 +2499,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
}
} else {
*result = hpage_collapse_scan_pmd(mm, vma,
- khugepaged_scan.address, &mmap_locked, cc);
+ khugepaged_scan.address, &mmap_locked,
+ &cur_progress, cc);
}
if (*result == SCAN_SUCCEED)
@@ -2492,7 +2508,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
- progress += HPAGE_PMD_NR;
+ progress += cur_progress;
if (!mmap_locked)
/*
* We released mmap_lock so break loop. Note
@@ -2810,11 +2826,11 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
mmap_read_unlock(mm);
mmap_locked = false;
result = hpage_collapse_scan_file(mm, addr, file, pgoff,
- cc);
+ NULL, cc);
fput(file);
} else {
result = hpage_collapse_scan_pmd(mm, vma, addr,
- &mmap_locked, cc);
+ &mmap_locked, NULL, cc);
}
if (!mmap_locked)
*lock_dropped = true;
On 1/6/26 06:55, Vernon Yang wrote:
> On Mon, Jan 05, 2026 at 05:49:22PM +0100, David Hildenbrand (Red Hat) wrote:
>> On 1/4/26 06:41, Vernon Yang wrote:
>>> Currently, each PMD scan always increases `progress` by HPAGE_PMD_NR,
>>> even if only scanning a single page. By counting the actual number of
>>
>> "... a single pmd" ?
>>
>>> pages scanned, the `progress` is tracked accurately.
>>
>> "page table entries / pages scanned" ?
>
> The single page is pte-4KB only. This patch does not change the original
> semantics of "progress", it simply uses the exact number of PTEs counted
> to replace HPAGE_PMD_NR.
You used the right terminology: "PTEs" counted.
It could be either a page or a PTE, depending on whether we collapse an
anon THP or an file thp.
You make it sound like we always scan pages.
[...]
>
>>>
>>> Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
>>> ---
>>> mm/khugepaged.c | 31 +++++++++++++++++++++++--------
>>> 1 file changed, 23 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>>> index 9f99f61689f8..4b124e854e2e 100644
>>> --- a/mm/khugepaged.c
>>> +++ b/mm/khugepaged.c
>>> @@ -1247,7 +1247,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>>> static int hpage_collapse_scan_pmd(struct mm_struct *mm,
>>> struct vm_area_struct *vma,
>>> unsigned long start_addr, bool *mmap_locked,
>>> - struct collapse_control *cc)
>>> + int *progress, struct collapse_control *cc)
>>> {
>>> pmd_t *pmd;
>>> pte_t *pte, *_pte;
>>> @@ -1258,23 +1258,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
>>> unsigned long addr;
>>> spinlock_t *ptl;
>>> int node = NUMA_NO_NODE, unmapped = 0;
>>> + int _progress = 0;
>>
>> "cur_progress" ?
>
> Yes.
>
>>> VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
>>> result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
>>> - if (result != SCAN_SUCCEED)
>>> + if (result != SCAN_SUCCEED) {
>>> + _progress = HPAGE_PMD_NR;
>>> goto out;
>>> + }
>>> memset(cc->node_load, 0, sizeof(cc->node_load));
>>> nodes_clear(cc->alloc_nmask);
>>> pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
>>> if (!pte) {
>>> + _progress = HPAGE_PMD_NR;
>>> result = SCAN_NO_PTE_TABLE;
>>> goto out;
>>> }
>>> for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
>>> _pte++, addr += PAGE_SIZE) {
>>> + _progress++;
>>> pte_t pteval = ptep_get(_pte);
>>> if (pte_none_or_zero(pteval)) {
>>> ++none_or_zero;
>>> @@ -1410,6 +1415,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
>>> *mmap_locked = false;
>>> }
>>> out:
>>> + if (progress)
>>> + *progress += _progress;
>>> +
>>> trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
>>> none_or_zero, result, unmapped);
>>> return result;
>>> @@ -2287,7 +2295,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>>> static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
>>> struct file *file, pgoff_t start,
>>> - struct collapse_control *cc)
>>> + int *progress, struct collapse_control *cc)
>>> {
>>> struct folio *folio = NULL;
>>> struct address_space *mapping = file->f_mapping;
>>> @@ -2295,6 +2303,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
>>> int present, swap;
>>> int node = NUMA_NO_NODE;
>>> int result = SCAN_SUCCEED;
>>> + int _progress = 0;
>>
>> Same here.
>>
>>
>> Not sure if it would be cleaner to just let the parent increment its counter
>> and returning instead the "cur_progress" from the function.
>
> Both are good for me, I have implemented one version as follows, please
> see if it is cleaner.
From a quick glimpse looks good.
--
Cheers
David
© 2016 - 2026 Red Hat, Inc.