In current mremap_folio_pte_batch(), 1) pte_batch_hint() always
return one pte in non-ARM64 machine, it is not efficient. 2) Next,
it need to acquire a folio to call the folio_pte_batch().
Due to new added can_pte_batch_count(), we just call it instead of
folio_pte_batch(). And then rename mremap_folio_pte_batch() to
mremap_pte_batch().
Signed-off-by: Zhang Qilong <zhangqilong3@huawei.com>
---
mm/mremap.c | 16 +++-------------
1 file changed, 3 insertions(+), 13 deletions(-)
diff --git a/mm/mremap.c b/mm/mremap.c
index bd7314898ec5..d11f93f1622f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -169,27 +169,17 @@ static pte_t move_soft_dirty_pte(pte_t pte)
pte = pte_swp_mksoft_dirty(pte);
#endif
return pte;
}
-static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
+static int mremap_pte_batch(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t pte, int max_nr)
{
- struct folio *folio;
-
if (max_nr == 1)
return 1;
- /* Avoid expensive folio lookup if we stand no chance of benefit. */
- if (pte_batch_hint(ptep, pte) == 1)
- return 1;
-
- folio = vm_normal_folio(vma, addr, pte);
- if (!folio || !folio_test_large(folio))
- return 1;
-
- return folio_pte_batch(folio, ptep, pte, max_nr);
+ return can_pte_batch_count(vma, ptep, &pte, max_nr, 0);
}
static int move_ptes(struct pagetable_move_control *pmc,
unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
{
@@ -278,11 +268,11 @@ static int move_ptes(struct pagetable_move_control *pmc,
* make sure the physical page stays valid until
* the TLB entry for the old mapping has been
* flushed.
*/
if (pte_present(old_pte)) {
- nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
+ nr_ptes = mremap_pte_batch(vma, old_addr, old_ptep,
old_pte, max_nr_ptes);
force_flush = true;
}
pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes);
pte = move_pte(pte, old_addr, new_addr);
--
2.43.0
On Mon, Oct 27, 2025 at 10:03:15PM +0800, Zhang Qilong wrote:
> In current mremap_folio_pte_batch(), 1) pte_batch_hint() always
> return one pte in non-ARM64 machine, it is not efficient. 2) Next,
Err... but there's basically no benefit for non-arm64 machines?
The key benefit is the mTHP side of things and making the underlying
arch-specific code more efficient right?
And again you need to get numbers to demonstrate you don't regress non-arm64.
> it need to acquire a folio to call the folio_pte_batch().
>
> Due to new added can_pte_batch_count(), we just call it instead of
> folio_pte_batch(). And then rename mremap_folio_pte_batch() to
> mremap_pte_batch().
>
> Signed-off-by: Zhang Qilong <zhangqilong3@huawei.com>
> ---
> mm/mremap.c | 16 +++-------------
> 1 file changed, 3 insertions(+), 13 deletions(-)
>
> diff --git a/mm/mremap.c b/mm/mremap.c
> index bd7314898ec5..d11f93f1622f 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -169,27 +169,17 @@ static pte_t move_soft_dirty_pte(pte_t pte)
> pte = pte_swp_mksoft_dirty(pte);
> #endif
> return pte;
> }
>
> -static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
> +static int mremap_pte_batch(struct vm_area_struct *vma, unsigned long addr,
> pte_t *ptep, pte_t pte, int max_nr)
> {
> - struct folio *folio;
> -
> if (max_nr == 1)
> return 1;
>
> - /* Avoid expensive folio lookup if we stand no chance of benefit. */
> - if (pte_batch_hint(ptep, pte) == 1)
> - return 1;
Why are we eliminating an easy exit here and instead always invoking the
more involved function?
Again this has to be tested against non-arm architectures.
> -
> - folio = vm_normal_folio(vma, addr, pte);
> - if (!folio || !folio_test_large(folio))
> - return 1;
> -
> - return folio_pte_batch(folio, ptep, pte, max_nr);
> + return can_pte_batch_count(vma, ptep, &pte, max_nr, 0);
This is very silly to have this function now ust return another function + a
trivial check that your function should be doing...
> }
>
> static int move_ptes(struct pagetable_move_control *pmc,
> unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
> {
> @@ -278,11 +268,11 @@ static int move_ptes(struct pagetable_move_control *pmc,
> * make sure the physical page stays valid until
> * the TLB entry for the old mapping has been
> * flushed.
> */
> if (pte_present(old_pte)) {
> - nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
> + nr_ptes = mremap_pte_batch(vma, old_addr, old_ptep,
> old_pte, max_nr_ptes);
> force_flush = true;
> }
> pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes);
> pte = move_pte(pte, old_addr, new_addr);
> --
> 2.43.0
>
On 27.10.25 15:03, Zhang Qilong wrote:
> In current mremap_folio_pte_batch(), 1) pte_batch_hint() always
> return one pte in non-ARM64 machine, it is not efficient. 2) Next,
> it need to acquire a folio to call the folio_pte_batch().
>
> Due to new added can_pte_batch_count(), we just call it instead of
> folio_pte_batch(). And then rename mremap_folio_pte_batch() to
> mremap_pte_batch().
>
> Signed-off-by: Zhang Qilong <zhangqilong3@huawei.com>
> ---
> mm/mremap.c | 16 +++-------------
> 1 file changed, 3 insertions(+), 13 deletions(-)
>
> diff --git a/mm/mremap.c b/mm/mremap.c
> index bd7314898ec5..d11f93f1622f 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -169,27 +169,17 @@ static pte_t move_soft_dirty_pte(pte_t pte)
> pte = pte_swp_mksoft_dirty(pte);
> #endif
> return pte;
> }
>
> -static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
> +static int mremap_pte_batch(struct vm_area_struct *vma, unsigned long addr,
> pte_t *ptep, pte_t pte, int max_nr)
> {
> - struct folio *folio;
> -
> if (max_nr == 1)
> return 1;
>
> - /* Avoid expensive folio lookup if we stand no chance of benefit. */
> - if (pte_batch_hint(ptep, pte) == 1)
> - return 1;
> -
> - folio = vm_normal_folio(vma, addr, pte);
> - if (!folio || !folio_test_large(folio))
> - return 1;
> -
> - return folio_pte_batch(folio, ptep, pte, max_nr);
> + return can_pte_batch_count(vma, ptep, &pte, max_nr, 0);
> }
>
> static int move_ptes(struct pagetable_move_control *pmc,
> unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
> {
> @@ -278,11 +268,11 @@ static int move_ptes(struct pagetable_move_control *pmc,
> * make sure the physical page stays valid until
> * the TLB entry for the old mapping has been
> * flushed.
> */
> if (pte_present(old_pte)) {
> - nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
> + nr_ptes = mremap_pte_batch(vma, old_addr, old_ptep,
> old_pte, max_nr_ptes);
> force_flush = true;
> }
> pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes);
get_and_clear_ptes() documents: "Clear present PTEs that map consecutive
pages of the same folio, collecting dirty/accessed bits."
And as can_pte_batch_count() will merge access/dirty bits, you would
silently set ptes dirty/accessed that belong to other folios, which
sounds very wrong.
Staring at the code, I wonder if there is also a problem with the write
bit, have to dig into that.
--
Cheers
David / dhildenb
© 2016 - 2026 Red Hat, Inc.