Currently, folio_referenced_one() always checks the young flag for each PTE
sequentially, which is inefficient for large folios. This inefficiency is
especially noticeable when reclaiming clean file-backed large folios, where
folio_referenced() is observed as a significant performance hotspot.
Moreover, on Arm architecture, which supports contiguous PTEs, there is already
an optimization to clear the young flags for PTEs within a contiguous range.
However, this is not sufficient. We can extend this to perform batched operations
for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
of the young flags and flushing TLB entries, thereby improving performance
during large folio reclamation.
Performance testing:
Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
33% performance improvement on my Arm64 32-core server (and 10%+ improvement
on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
from approximately 35% to around 5%.
W/o patchset:
real 0m1.518s
user 0m0.000s
sys 0m1.518s
W/ patchset:
real 0m1.018s
user 0m0.000s
sys 0m1.018s
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
arch/arm64/include/asm/pgtable.h | 11 +++++++++++
include/linux/mmu_notifier.h | 9 +++++----
include/linux/pgtable.h | 19 +++++++++++++++++++
mm/rmap.c | 22 ++++++++++++++++++++--
4 files changed, 55 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e03034683156..a865bd8c46a3 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
}
+#define clear_flush_young_ptes clear_flush_young_ptes
+static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+ if (likely(nr == 1))
+ return __ptep_clear_flush_young(vma, addr, ptep);
+
+ return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
+}
+
#define wrprotect_ptes wrprotect_ptes
static __always_inline void wrprotect_ptes(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, unsigned int nr)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index d1094c2d5fb6..be594b274729 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
range->owner = owner;
}
-#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
+#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
- __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
+ unsigned int ___nr = __nr; \
+ __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
___address, \
___address + \
- PAGE_SIZE); \
+ nr * PAGE_SIZE); \
__young; \
})
@@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define mmu_notifier_range_update_to_read_only(r) false
-#define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define ptep_clear_flush_young_notify clear_flush_young_ptes
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
#define pmdp_clear_young_notify pmdp_test_and_clear_young
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index b13b6f42be3c..c7d0fd228cb7 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
}
#endif
+#ifndef clear_flush_young_ptes
+static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+ int young = 0;
+
+ for (;;) {
+ young |= ptep_clear_flush_young(vma, addr, ptep);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+
+ return young;
+}
+#endif
+
/*
* On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings
diff --git a/mm/rmap.c b/mm/rmap.c
index d6799afe1114..ec232165c47d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
struct folio_referenced_arg *pra = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
int ptes = 0, referenced = 0;
+ unsigned int nr;
while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address;
+ nr = 1;
if (vma->vm_flags & VM_LOCKED) {
ptes++;
@@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
if (lru_gen_look_around(&pvmw))
referenced++;
} else if (pvmw.pte) {
+ if (folio_test_large(folio)) {
+ unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
+ unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
+ pte_t pteval = ptep_get(pvmw.pte);
+
+ nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
+ }
+
+ ptes += nr;
if (ptep_clear_flush_young_notify(vma, address,
- pvmw.pte))
+ pvmw.pte, nr))
referenced++;
+ /* Skip the batched PTEs */
+ pvmw.pte += nr - 1;
+ pvmw.address += (nr - 1) * PAGE_SIZE;
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_flush_young_notify(vma, address,
pvmw.pmd))
@@ -886,7 +900,11 @@ static bool folio_referenced_one(struct folio *folio,
WARN_ON_ONCE(1);
}
- pra->mapcount--;
+ pra->mapcount -= nr;
+ if (ptes == pvmw.nr_pages) {
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
}
if (referenced)
--
2.47.3
On 11/12/2025 08:16, Baolin Wang wrote:
> Currently, folio_referenced_one() always checks the young flag for each PTE
> sequentially, which is inefficient for large folios. This inefficiency is
> especially noticeable when reclaiming clean file-backed large folios, where
> folio_referenced() is observed as a significant performance hotspot.
>
> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
> an optimization to clear the young flags for PTEs within a contiguous range.
> However, this is not sufficient. We can extend this to perform batched operations
> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>
> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
> of the young flags and flushing TLB entries, thereby improving performance
> during large folio reclamation.
>
> Performance testing:
> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
> from approximately 35% to around 5%.
>
> W/o patchset:
> real 0m1.518s
> user 0m0.000s
> sys 0m1.518s
>
> W/ patchset:
> real 0m1.018s
> user 0m0.000s
> sys 0m1.018s
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
> include/linux/mmu_notifier.h | 9 +++++----
> include/linux/pgtable.h | 19 +++++++++++++++++++
> mm/rmap.c | 22 ++++++++++++++++++++--
> 4 files changed, 55 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index e03034683156..a865bd8c46a3 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
> }
>
> +#define clear_flush_young_ptes clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + if (likely(nr == 1))
> + return __ptep_clear_flush_young(vma, addr, ptep);
Bug: This is broken if core-mm tries to call this for nr=1 on a pte that is part
of a contpte mapping.
The similar fastpaths are here to prevent regressing the common small folio case.
I guess here the best approach is (note no leading underscores):
if (likely(nr == 1))
return ptep_clear_flush_young(vma, addr, ptep);
> +
> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
> +}
> +
> #define wrprotect_ptes wrprotect_ptes
> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep, unsigned int nr)
> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
> index d1094c2d5fb6..be594b274729 100644
> --- a/include/linux/mmu_notifier.h
> +++ b/include/linux/mmu_notifier.h
> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
> range->owner = owner;
> }
>
> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
Shouldn't we rename this macro to clear_flush_young_ptes_notify()?
And potentially:
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
clear_flush_young_ptes_notify(__vma, __address, __ptep, 1)
if there are other non-batched users remaining.
> ({ \
> int __young; \
> struct vm_area_struct *___vma = __vma; \
> unsigned long ___address = __address; \
> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
> + unsigned int ___nr = __nr; \
> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
> ___address, \
> ___address + \
> - PAGE_SIZE); \
> + nr * PAGE_SIZE); \
> __young; \
> })
> > @@ -650,7 +651,7 @@ static inline void
mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>
> #define mmu_notifier_range_update_to_read_only(r) false
>
> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
> #define ptep_clear_young_notify ptep_test_and_clear_young
> #define pmdp_clear_young_notify pmdp_test_and_clear_young
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index b13b6f42be3c..c7d0fd228cb7 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
> }
> #endif
>
> +#ifndef clear_flush_young_ptes
Let's have some function documentation here please.
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + int young = 0;
> +
> + for (;;) {
I know Lorenzo is pretty allergic to this style of looping :)
He's right of course, we should probably just do this the ideomatic way and not
worry about it looking a bit different to the others.
> + young |= ptep_clear_flush_young(vma, addr, ptep);
> + if (--nr == 0)
> + break;
> + ptep++;
> + addr += PAGE_SIZE;
> + }
> +
> + return young;
> +}
> +#endif
> +
> /*
> * On some architectures hardware does not set page access bit when accessing
> * memory page, it is responsibility of software setting this bit. It brings
> diff --git a/mm/rmap.c b/mm/rmap.c
> index d6799afe1114..ec232165c47d 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
> struct folio_referenced_arg *pra = arg;
> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
> int ptes = 0, referenced = 0;
> + unsigned int nr;
>
> while (page_vma_mapped_walk(&pvmw)) {
> address = pvmw.address;
> + nr = 1;
>
> if (vma->vm_flags & VM_LOCKED) {
> ptes++;
> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
> if (lru_gen_look_around(&pvmw))
> referenced++;
> } else if (pvmw.pte) {
> + if (folio_test_large(folio)) {
> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
> + pte_t pteval = ptep_get(pvmw.pte);
> +
> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
> + }
> +
> + ptes += nr;
> if (ptep_clear_flush_young_notify(vma, address,
> - pvmw.pte))
> + pvmw.pte, nr))
> referenced++;
> + /* Skip the batched PTEs */
> + pvmw.pte += nr - 1;
> + pvmw.address += (nr - 1) * PAGE_SIZE;
The -1 part is because the walker will increment by 1 I'm guessing?
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> if (pmdp_clear_flush_young_notify(vma, address,
> pvmw.pmd))
> @@ -886,7 +900,11 @@ static bool folio_referenced_one(struct folio *folio,
> WARN_ON_ONCE(1);
> }
>
> - pra->mapcount--;
> + pra->mapcount -= nr;
> + if (ptes == pvmw.nr_pages) {
> + page_vma_mapped_walk_done(&pvmw);
> + break;
What's this needed for? I'm suspicious because there wasn't an equivalent here
before.
Thanks,
Ryan
> + }
> }
>
> if (referenced)
On 2025/12/18 00:39, Ryan Roberts wrote:
> On 11/12/2025 08:16, Baolin Wang wrote:
>> Currently, folio_referenced_one() always checks the young flag for each PTE
>> sequentially, which is inefficient for large folios. This inefficiency is
>> especially noticeable when reclaiming clean file-backed large folios, where
>> folio_referenced() is observed as a significant performance hotspot.
>>
>> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
>> an optimization to clear the young flags for PTEs within a contiguous range.
>> However, this is not sufficient. We can extend this to perform batched operations
>> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>>
>> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
>> of the young flags and flushing TLB entries, thereby improving performance
>> during large folio reclamation.
>>
>> Performance testing:
>> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
>> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
>> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
>> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
>> from approximately 35% to around 5%.
>>
>> W/o patchset:
>> real 0m1.518s
>> user 0m0.000s
>> sys 0m1.518s
>>
>> W/ patchset:
>> real 0m1.018s
>> user 0m0.000s
>> sys 0m1.018s
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
>> include/linux/mmu_notifier.h | 9 +++++----
>> include/linux/pgtable.h | 19 +++++++++++++++++++
>> mm/rmap.c | 22 ++++++++++++++++++++--
>> 4 files changed, 55 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index e03034683156..a865bd8c46a3 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
>> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
>> }
>>
>> +#define clear_flush_young_ptes clear_flush_young_ptes
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + if (likely(nr == 1))
>> + return __ptep_clear_flush_young(vma, addr, ptep);
>
> Bug: This is broken if core-mm tries to call this for nr=1 on a pte that is part
> of a contpte mapping.
>
> The similar fastpaths are here to prevent regressing the common small folio case.
Thanks for catching this. I had considered this before, but I still
missed it.
> I guess here the best approach is (note no leading underscores):
>
> if (likely(nr == 1))
> return ptep_clear_flush_young(vma, addr, ptep);
However, I prefer to use pte_cont() to check it. Later, I plan to clean
up the ptep_clear_flush_young().
if (nr == 1 && !pte_cont(__ptep_get(ptep))
return __ptep_clear_flush_young(vma, addr, ptep);
>> +
>> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
>> +}
>> +
>> #define wrprotect_ptes wrprotect_ptes
>> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
>> unsigned long addr, pte_t *ptep, unsigned int nr)
>> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
>> index d1094c2d5fb6..be594b274729 100644
>> --- a/include/linux/mmu_notifier.h
>> +++ b/include/linux/mmu_notifier.h
>> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
>> range->owner = owner;
>> }
>>
>> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
>> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
>
> Shouldn't we rename this macro to clear_flush_young_ptes_notify()?
>
> And potentially:
>
> #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
> clear_flush_young_ptes_notify(__vma, __address, __ptep, 1)
>
> if there are other non-batched users remaining.
There are no other non-batched users now, so seems there is no need to
add another redundant API.
>> ({ \
>> int __young; \
>> struct vm_area_struct *___vma = __vma; \
>> unsigned long ___address = __address; \
>> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
>> + unsigned int ___nr = __nr; \
>> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
>> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
>> ___address, \
>> ___address + \
>> - PAGE_SIZE); \
>> + nr * PAGE_SIZE); \
>> __young; \
>> })
>> > @@ -650,7 +651,7 @@ static inline void
> mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>>
>> #define mmu_notifier_range_update_to_read_only(r) false
>>
>> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
>> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
>> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
>> #define ptep_clear_young_notify ptep_test_and_clear_young
>> #define pmdp_clear_young_notify pmdp_test_and_clear_young
>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>> index b13b6f42be3c..c7d0fd228cb7 100644
>> --- a/include/linux/pgtable.h
>> +++ b/include/linux/pgtable.h
>> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
>> }
>> #endif
>>
>> +#ifndef clear_flush_young_ptes
>
> Let's have some function documentation here please.
Sure. Will do.
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + int young = 0;
>> +
>> + for (;;) {
>
> I know Lorenzo is pretty allergic to this style of looping :)
>
> He's right of course, we should probably just do this the ideomatic way and not
> worry about it looking a bit different to the others.
Let me use the 'while (--nr) { }' instead.
>
>> + young |= ptep_clear_flush_young(vma, addr, ptep);
>> + if (--nr == 0)
>> + break;
>> + ptep++;
>> + addr += PAGE_SIZE;
>> + }
>> +
>> + return young;
>> +}
>> +#endif
>> +
>> /*
>> * On some architectures hardware does not set page access bit when accessing
>> * memory page, it is responsibility of software setting this bit. It brings
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index d6799afe1114..ec232165c47d 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
>> struct folio_referenced_arg *pra = arg;
>> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
>> int ptes = 0, referenced = 0;
>> + unsigned int nr;
>>
>> while (page_vma_mapped_walk(&pvmw)) {
>> address = pvmw.address;
>> + nr = 1;
>>
>> if (vma->vm_flags & VM_LOCKED) {
>> ptes++;
>> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
>> if (lru_gen_look_around(&pvmw))
>> referenced++;
>> } else if (pvmw.pte) {
>> + if (folio_test_large(folio)) {
>> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
>> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
>> + pte_t pteval = ptep_get(pvmw.pte);
>> +
>> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
>> + }
>> +
>> + ptes += nr;
>> if (ptep_clear_flush_young_notify(vma, address,
>> - pvmw.pte))
>> + pvmw.pte, nr))
>> referenced++;
>> + /* Skip the batched PTEs */
>> + pvmw.pte += nr - 1;
>> + pvmw.address += (nr - 1) * PAGE_SIZE;
>
> The -1 part is because the walker will increment by 1 I'm guessing?
Right.
>
>> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
>> if (pmdp_clear_flush_young_notify(vma, address,
>> pvmw.pmd))
>> @@ -886,7 +900,11 @@ static bool folio_referenced_one(struct folio *folio,
>> WARN_ON_ONCE(1);
>> }
>>
>> - pra->mapcount--;
>> + pra->mapcount -= nr;
>> + if (ptes == pvmw.nr_pages) {
>> + page_vma_mapped_walk_done(&pvmw);
>> + break;
>
> What's this needed for? I'm suspicious because there wasn't an equivalent here
> before.
If we are sure that we batched the entire folio, we can just optimize
and stop right here.
Thanks for reviewing.
>>> +#define clear_flush_young_ptes clear_flush_young_ptes
>>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>>> + unsigned long addr, pte_t *ptep,
>>> + unsigned int nr)
>>> +{
>>> + if (likely(nr == 1))
>>> + return __ptep_clear_flush_young(vma, addr, ptep);
>>
>> Bug: This is broken if core-mm tries to call this for nr=1 on a pte that is part
>> of a contpte mapping.
>>
>> The similar fastpaths are here to prevent regressing the common small folio case.
>
> Thanks for catching this. I had considered this before, but I still missed it.
>
>> I guess here the best approach is (note no leading underscores):
>>
>> if (likely(nr == 1))
>> return ptep_clear_flush_young(vma, addr, ptep);
>
> However, I prefer to use pte_cont() to check it. Later, I plan to clean up the
> ptep_clear_flush_young().
>
> if (nr == 1 && !pte_cont(__ptep_get(ptep))
> return __ptep_clear_flush_young(vma, addr, ptep);
Sure. That would follow the pattern in clear_young_dirty_ptes(). Please use the
likely() hint as is done everywhere else:
if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
I notice that ptep_test_and_clear_young() and ptep_clear_flush_young() are both
testing aginst pte_valid_cont(). These could probably be relaxed to pte_cont()
since it is implicit the the pte must be valid?
On 2025/12/18 20:08, Ryan Roberts wrote:
>>>> +#define clear_flush_young_ptes clear_flush_young_ptes
>>>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>>>> + unsigned long addr, pte_t *ptep,
>>>> + unsigned int nr)
>>>> +{
>>>> + if (likely(nr == 1))
>>>> + return __ptep_clear_flush_young(vma, addr, ptep);
>>>
>>> Bug: This is broken if core-mm tries to call this for nr=1 on a pte that is part
>>> of a contpte mapping.
>>>
>>> The similar fastpaths are here to prevent regressing the common small folio case.
>>
>> Thanks for catching this. I had considered this before, but I still missed it.
>>
>>> I guess here the best approach is (note no leading underscores):
>>>
>>> if (likely(nr == 1))
>>> return ptep_clear_flush_young(vma, addr, ptep);
>>
>> However, I prefer to use pte_cont() to check it. Later, I plan to clean up the
>> ptep_clear_flush_young().
>>
>> if (nr == 1 && !pte_cont(__ptep_get(ptep))
>> return __ptep_clear_flush_young(vma, addr, ptep);
>
> Sure. That would follow the pattern in clear_young_dirty_ptes(). Please use the
> likely() hint as is done everywhere else:
>
> if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
Sure.
> I notice that ptep_test_and_clear_young() and ptep_clear_flush_young() are both
> testing aginst pte_valid_cont(). These could probably be relaxed to pte_cont()
> since it is implicit the the pte must be valid?
Yes, I think so. I can do a cleanup later in a separate patch.
On 11/12/25 1:46 pm, Baolin Wang wrote:
> Currently, folio_referenced_one() always checks the young flag for each PTE
> sequentially, which is inefficient for large folios. This inefficiency is
> especially noticeable when reclaiming clean file-backed large folios, where
> folio_referenced() is observed as a significant performance hotspot.
>
> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
> an optimization to clear the young flags for PTEs within a contiguous range.
> However, this is not sufficient. We can extend this to perform batched operations
> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>
> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
> of the young flags and flushing TLB entries, thereby improving performance
> during large folio reclamation.
>
> Performance testing:
> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
> from approximately 35% to around 5%.
>
> W/o patchset:
> real 0m1.518s
> user 0m0.000s
> sys 0m1.518s
>
> W/ patchset:
> real 0m1.018s
> user 0m0.000s
> sys 0m1.018s
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
> include/linux/mmu_notifier.h | 9 +++++----
> include/linux/pgtable.h | 19 +++++++++++++++++++
> mm/rmap.c | 22 ++++++++++++++++++++--
> 4 files changed, 55 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index e03034683156..a865bd8c46a3 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
> }
>
> +#define clear_flush_young_ptes clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + if (likely(nr == 1))
> + return __ptep_clear_flush_young(vma, addr, ptep);
> +
> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
> +}
> +
> #define wrprotect_ptes wrprotect_ptes
> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep, unsigned int nr)
> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
> index d1094c2d5fb6..be594b274729 100644
> --- a/include/linux/mmu_notifier.h
> +++ b/include/linux/mmu_notifier.h
> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
> range->owner = owner;
> }
>
> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
> ({ \
> int __young; \
> struct vm_area_struct *___vma = __vma; \
> unsigned long ___address = __address; \
> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
> + unsigned int ___nr = __nr; \
> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
> ___address, \
> ___address + \
> - PAGE_SIZE); \
> + nr * PAGE_SIZE); \
> __young; \
> })
>
> @@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>
> #define mmu_notifier_range_update_to_read_only(r) false
>
> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
> #define ptep_clear_young_notify ptep_test_and_clear_young
> #define pmdp_clear_young_notify pmdp_test_and_clear_young
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index b13b6f42be3c..c7d0fd228cb7 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
> }
> #endif
>
> +#ifndef clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + int young = 0;
> +
> + for (;;) {
> + young |= ptep_clear_flush_young(vma, addr, ptep);
> + if (--nr == 0)
> + break;
> + ptep++;
> + addr += PAGE_SIZE;
> + }
> +
> + return young;
> +}
> +#endif
> +
> /*
> * On some architectures hardware does not set page access bit when accessing
> * memory page, it is responsibility of software setting this bit. It brings
> diff --git a/mm/rmap.c b/mm/rmap.c
> index d6799afe1114..ec232165c47d 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
> struct folio_referenced_arg *pra = arg;
> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
> int ptes = 0, referenced = 0;
> + unsigned int nr;
>
> while (page_vma_mapped_walk(&pvmw)) {
> address = pvmw.address;
> + nr = 1;
>
> if (vma->vm_flags & VM_LOCKED) {
> ptes++;
> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
> if (lru_gen_look_around(&pvmw))
> referenced++;
> } else if (pvmw.pte) {
> + if (folio_test_large(folio)) {
> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
I may be hallucinating here but I am just trying to recall things - is this a bug in
folio_pte_batch_flags()? A folio may not be naturally aligned in virtual space and hence
we may cross the PTE table while batching across it, which can be fixed by taking into
account pmd_addr_end() while computing max_nr.
> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
> + pte_t pteval = ptep_get(pvmw.pte);
> +
> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
> + }
> +
> + ptes += nr;
> if (ptep_clear_flush_young_notify(vma, address,
> - pvmw.pte))
> + pvmw.pte, nr))
> referenced++;
> + /* Skip the batched PTEs */
> + pvmw.pte += nr - 1;
> + pvmw.address += (nr - 1) * PAGE_SIZE;
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> if (pmdp_clear_flush_young_notify(vma, address,
> pvmw.pmd))
> @@ -886,7 +900,11 @@ static bool folio_referenced_one(struct folio *folio,
> WARN_ON_ONCE(1);
> }
>
> - pra->mapcount--;
> + pra->mapcount -= nr;
> + if (ptes == pvmw.nr_pages) {
> + page_vma_mapped_walk_done(&pvmw);
> + break;
> + }
> }
>
> if (referenced)
On 2025/12/17 14:49, Dev Jain wrote:
>
> On 11/12/25 1:46 pm, Baolin Wang wrote:
>> Currently, folio_referenced_one() always checks the young flag for each PTE
>> sequentially, which is inefficient for large folios. This inefficiency is
>> especially noticeable when reclaiming clean file-backed large folios, where
>> folio_referenced() is observed as a significant performance hotspot.
>>
>> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
>> an optimization to clear the young flags for PTEs within a contiguous range.
>> However, this is not sufficient. We can extend this to perform batched operations
>> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>>
>> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
>> of the young flags and flushing TLB entries, thereby improving performance
>> during large folio reclamation.
>>
>> Performance testing:
>> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
>> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
>> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
>> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
>> from approximately 35% to around 5%.
>>
>> W/o patchset:
>> real 0m1.518s
>> user 0m0.000s
>> sys 0m1.518s
>>
>> W/ patchset:
>> real 0m1.018s
>> user 0m0.000s
>> sys 0m1.018s
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
>> include/linux/mmu_notifier.h | 9 +++++----
>> include/linux/pgtable.h | 19 +++++++++++++++++++
>> mm/rmap.c | 22 ++++++++++++++++++++--
>> 4 files changed, 55 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index e03034683156..a865bd8c46a3 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
>> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
>> }
>>
>> +#define clear_flush_young_ptes clear_flush_young_ptes
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + if (likely(nr == 1))
>> + return __ptep_clear_flush_young(vma, addr, ptep);
>> +
>> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
>> +}
>> +
>> #define wrprotect_ptes wrprotect_ptes
>> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
>> unsigned long addr, pte_t *ptep, unsigned int nr)
>> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
>> index d1094c2d5fb6..be594b274729 100644
>> --- a/include/linux/mmu_notifier.h
>> +++ b/include/linux/mmu_notifier.h
>> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
>> range->owner = owner;
>> }
>>
>> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
>> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
>> ({ \
>> int __young; \
>> struct vm_area_struct *___vma = __vma; \
>> unsigned long ___address = __address; \
>> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
>> + unsigned int ___nr = __nr; \
>> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
>> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
>> ___address, \
>> ___address + \
>> - PAGE_SIZE); \
>> + nr * PAGE_SIZE); \
>> __young; \
>> })
>>
>> @@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>>
>> #define mmu_notifier_range_update_to_read_only(r) false
>>
>> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
>> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
>> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
>> #define ptep_clear_young_notify ptep_test_and_clear_young
>> #define pmdp_clear_young_notify pmdp_test_and_clear_young
>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>> index b13b6f42be3c..c7d0fd228cb7 100644
>> --- a/include/linux/pgtable.h
>> +++ b/include/linux/pgtable.h
>> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
>> }
>> #endif
>>
>> +#ifndef clear_flush_young_ptes
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + int young = 0;
>> +
>> + for (;;) {
>> + young |= ptep_clear_flush_young(vma, addr, ptep);
>> + if (--nr == 0)
>> + break;
>> + ptep++;
>> + addr += PAGE_SIZE;
>> + }
>> +
>> + return young;
>> +}
>> +#endif
>> +
>> /*
>> * On some architectures hardware does not set page access bit when accessing
>> * memory page, it is responsibility of software setting this bit. It brings
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index d6799afe1114..ec232165c47d 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
>> struct folio_referenced_arg *pra = arg;
>> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
>> int ptes = 0, referenced = 0;
>> + unsigned int nr;
>>
>> while (page_vma_mapped_walk(&pvmw)) {
>> address = pvmw.address;
>> + nr = 1;
>>
>> if (vma->vm_flags & VM_LOCKED) {
>> ptes++;
>> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
>> if (lru_gen_look_around(&pvmw))
>> referenced++;
>> } else if (pvmw.pte) {
>> + if (folio_test_large(folio)) {
>> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
>
> I may be hallucinating here but I am just trying to recall things - is this a bug in
> folio_pte_batch_flags()? A folio may not be naturally aligned in virtual space and hence
> we may cross the PTE table while batching across it, which can be fixed by taking into
> account pmd_addr_end() while computing max_nr.
IMHO, the comments for the folio_pte_batch_flags() function have already
made clear requirements for the caller to avoid such situations:
"
* @ptep must map any page of the folio. max_nr must be at least one and
* must be limited by the caller so scanning cannot exceed a single VMA and
* a single page table.
"
Additionally, Lance recently fixed a similar issue, see commit
ddd05742b45b ("mm/rmap: fix potential out-of-bounds page table access
during batched unmap").
On 17/12/25 12:39 pm, Baolin Wang wrote:
>
>
> On 2025/12/17 14:49, Dev Jain wrote:
>>
>> On 11/12/25 1:46 pm, Baolin Wang wrote:
>>> Currently, folio_referenced_one() always checks the young flag for each PTE
>>> sequentially, which is inefficient for large folios. This inefficiency is
>>> especially noticeable when reclaiming clean file-backed large folios, where
>>> folio_referenced() is observed as a significant performance hotspot.
>>>
>>> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
>>> an optimization to clear the young flags for PTEs within a contiguous range.
>>> However, this is not sufficient. We can extend this to perform batched
>>> operations
>>> for the entire large folio (which might exceed the contiguous range:
>>> CONT_PTE_SIZE).
>>>
>>> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
>>> of the young flags and flushing TLB entries, thereby improving performance
>>> during large folio reclamation.
>>>
>>> Performance testing:
>>> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
>>> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
>>> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
>>> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
>>> from approximately 35% to around 5%.
>>>
>>> W/o patchset:
>>> real 0m1.518s
>>> user 0m0.000s
>>> sys 0m1.518s
>>>
>>> W/ patchset:
>>> real 0m1.018s
>>> user 0m0.000s
>>> sys 0m1.018s
>>>
>>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>>> ---
>>> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
>>> include/linux/mmu_notifier.h | 9 +++++----
>>> include/linux/pgtable.h | 19 +++++++++++++++++++
>>> mm/rmap.c | 22 ++++++++++++++++++++--
>>> 4 files changed, 55 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/arch/arm64/include/asm/pgtable.h
>>> b/arch/arm64/include/asm/pgtable.h
>>> index e03034683156..a865bd8c46a3 100644
>>> --- a/arch/arm64/include/asm/pgtable.h
>>> +++ b/arch/arm64/include/asm/pgtable.h
>>> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct
>>> vm_area_struct *vma,
>>> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
>>> }
>>> +#define clear_flush_young_ptes clear_flush_young_ptes
>>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>>> + unsigned long addr, pte_t *ptep,
>>> + unsigned int nr)
>>> +{
>>> + if (likely(nr == 1))
>>> + return __ptep_clear_flush_young(vma, addr, ptep);
>>> +
>>> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
>>> +}
>>> +
>>> #define wrprotect_ptes wrprotect_ptes
>>> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
>>> unsigned long addr, pte_t *ptep, unsigned int nr)
>>> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
>>> index d1094c2d5fb6..be594b274729 100644
>>> --- a/include/linux/mmu_notifier.h
>>> +++ b/include/linux/mmu_notifier.h
>>> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
>>> range->owner = owner;
>>> }
>>> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
>>> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
>>> ({ \
>>> int __young; \
>>> struct vm_area_struct *___vma = __vma; \
>>> unsigned long ___address = __address; \
>>> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
>>> + unsigned int ___nr = __nr; \
>>> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
>>> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
>>> ___address, \
>>> ___address + \
>>> - PAGE_SIZE); \
>>> + nr * PAGE_SIZE); \
>>> __young; \
>>> })
>>> @@ -650,7 +651,7 @@ static inline void
>>> mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>>> #define mmu_notifier_range_update_to_read_only(r) false
>>> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
>>> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
>>> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
>>> #define ptep_clear_young_notify ptep_test_and_clear_young
>>> #define pmdp_clear_young_notify pmdp_test_and_clear_young
>>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>>> index b13b6f42be3c..c7d0fd228cb7 100644
>>> --- a/include/linux/pgtable.h
>>> +++ b/include/linux/pgtable.h
>>> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm,
>>> unsigned long addr,
>>> }
>>> #endif
>>> +#ifndef clear_flush_young_ptes
>>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>>> + unsigned long addr, pte_t *ptep,
>>> + unsigned int nr)
>>> +{
>>> + int young = 0;
>>> +
>>> + for (;;) {
>>> + young |= ptep_clear_flush_young(vma, addr, ptep);
>>> + if (--nr == 0)
>>> + break;
>>> + ptep++;
>>> + addr += PAGE_SIZE;
>>> + }
>>> +
>>> + return young;
>>> +}
>>> +#endif
>>> +
>>> /*
>>> * On some architectures hardware does not set page access bit when accessing
>>> * memory page, it is responsibility of software setting this bit. It brings
>>> diff --git a/mm/rmap.c b/mm/rmap.c
>>> index d6799afe1114..ec232165c47d 100644
>>> --- a/mm/rmap.c
>>> +++ b/mm/rmap.c
>>> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
>>> struct folio_referenced_arg *pra = arg;
>>> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
>>> int ptes = 0, referenced = 0;
>>> + unsigned int nr;
>>> while (page_vma_mapped_walk(&pvmw)) {
>>> address = pvmw.address;
>>> + nr = 1;
>>> if (vma->vm_flags & VM_LOCKED) {
>>> ptes++;
>>> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
>>> if (lru_gen_look_around(&pvmw))
>>> referenced++;
>>> } else if (pvmw.pte) {
>>> + if (folio_test_large(folio)) {
>>> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
>>
>> I may be hallucinating here but I am just trying to recall things - is this a
>> bug in
>> folio_pte_batch_flags()? A folio may not be naturally aligned in virtual
>> space and hence
>> we may cross the PTE table while batching across it, which can be fixed by
>> taking into
>> account pmd_addr_end() while computing max_nr.
>
> IMHO, the comments for the folio_pte_batch_flags() function have already made
> clear requirements for the caller to avoid such situations:
>
> "
> * @ptep must map any page of the folio. max_nr must be at least one and
> * must be limited by the caller so scanning cannot exceed a single VMA and
> * a single page table.
> "
>
> Additionally, Lance recently fixed a similar issue, see commit ddd05742b45b
> ("mm/rmap: fix potential out-of-bounds page table access during batched unmap").
Ah I see, all other users of the folio_pte_batch API constrain start and end
because they are already operating on a single PTE table. But for rmap code
this may not be the case.
On 11/12/25 1:46 pm, Baolin Wang wrote:
> Currently, folio_referenced_one() always checks the young flag for each PTE
> sequentially, which is inefficient for large folios. This inefficiency is
> especially noticeable when reclaiming clean file-backed large folios, where
> folio_referenced() is observed as a significant performance hotspot.
>
> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
> an optimization to clear the young flags for PTEs within a contiguous range.
> However, this is not sufficient. We can extend this to perform batched operations
> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>
> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
> of the young flags and flushing TLB entries, thereby improving performance
> during large folio reclamation.
>
> Performance testing:
> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
> from approximately 35% to around 5%.
>
> W/o patchset:
> real 0m1.518s
> user 0m0.000s
> sys 0m1.518s
>
> W/ patchset:
> real 0m1.018s
> user 0m0.000s
> sys 0m1.018s
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
> include/linux/mmu_notifier.h | 9 +++++----
> include/linux/pgtable.h | 19 +++++++++++++++++++
> mm/rmap.c | 22 ++++++++++++++++++++--
> 4 files changed, 55 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index e03034683156..a865bd8c46a3 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
> }
>
> +#define clear_flush_young_ptes clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + if (likely(nr == 1))
> + return __ptep_clear_flush_young(vma, addr, ptep);
> +
> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
> +}
> +
> #define wrprotect_ptes wrprotect_ptes
> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep, unsigned int nr)
> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
> index d1094c2d5fb6..be594b274729 100644
> --- a/include/linux/mmu_notifier.h
> +++ b/include/linux/mmu_notifier.h
> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
> range->owner = owner;
> }
>
> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
> ({ \
> int __young; \
> struct vm_area_struct *___vma = __vma; \
> unsigned long ___address = __address; \
> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
> + unsigned int ___nr = __nr; \
> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
> ___address, \
> ___address + \
> - PAGE_SIZE); \
> + nr * PAGE_SIZE); \
> __young; \
> })
Do we have an existing bug here, in that mmu_notifier_clear_flush_young() should
have been called for CONT_PTES length if the folio was contpte mapped?
>
> @@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>
> #define mmu_notifier_range_update_to_read_only(r) false
>
> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
> #define ptep_clear_young_notify ptep_test_and_clear_young
> #define pmdp_clear_young_notify pmdp_test_and_clear_young
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index b13b6f42be3c..c7d0fd228cb7 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
> }
> #endif
>
> +#ifndef clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + int young = 0;
> +
> + for (;;) {
> + young |= ptep_clear_flush_young(vma, addr, ptep);
> + if (--nr == 0)
> + break;
> + ptep++;
> + addr += PAGE_SIZE;
> + }
> +
> + return young;
> +}
> +#endif
> +
> /*
> * On some architectures hardware does not set page access bit when accessing
> * memory page, it is responsibility of software setting this bit. It brings
> diff --git a/mm/rmap.c b/mm/rmap.c
> index d6799afe1114..ec232165c47d 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
> struct folio_referenced_arg *pra = arg;
> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
> int ptes = 0, referenced = 0;
> + unsigned int nr;
>
> while (page_vma_mapped_walk(&pvmw)) {
> address = pvmw.address;
> + nr = 1;
>
> if (vma->vm_flags & VM_LOCKED) {
> ptes++;
> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
> if (lru_gen_look_around(&pvmw))
> referenced++;
> } else if (pvmw.pte) {
> + if (folio_test_large(folio)) {
> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
> + pte_t pteval = ptep_get(pvmw.pte);
> +
> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
> + }
> +
> + ptes += nr;
> if (ptep_clear_flush_young_notify(vma, address,
> - pvmw.pte))
> + pvmw.pte, nr))
> referenced++;
> + /* Skip the batched PTEs */
> + pvmw.pte += nr - 1;
> + pvmw.address += (nr - 1) * PAGE_SIZE;
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> if (pmdp_clear_flush_young_notify(vma, address,
> pvmw.pmd))
> @@ -886,7 +900,11 @@ static bool folio_referenced_one(struct folio *folio,
> WARN_ON_ONCE(1);
> }
>
> - pra->mapcount--;
> + pra->mapcount -= nr;
> + if (ptes == pvmw.nr_pages) {
> + page_vma_mapped_walk_done(&pvmw);
> + break;
> + }
> }
>
> if (referenced)
On 2025/12/17 14:23, Dev Jain wrote:
>
> On 11/12/25 1:46 pm, Baolin Wang wrote:
>> Currently, folio_referenced_one() always checks the young flag for each PTE
>> sequentially, which is inefficient for large folios. This inefficiency is
>> especially noticeable when reclaiming clean file-backed large folios, where
>> folio_referenced() is observed as a significant performance hotspot.
>>
>> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
>> an optimization to clear the young flags for PTEs within a contiguous range.
>> However, this is not sufficient. We can extend this to perform batched operations
>> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>>
>> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
>> of the young flags and flushing TLB entries, thereby improving performance
>> during large folio reclamation.
>>
>> Performance testing:
>> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
>> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
>> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
>> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
>> from approximately 35% to around 5%.
>>
>> W/o patchset:
>> real 0m1.518s
>> user 0m0.000s
>> sys 0m1.518s
>>
>> W/ patchset:
>> real 0m1.018s
>> user 0m0.000s
>> sys 0m1.018s
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
>> include/linux/mmu_notifier.h | 9 +++++----
>> include/linux/pgtable.h | 19 +++++++++++++++++++
>> mm/rmap.c | 22 ++++++++++++++++++++--
>> 4 files changed, 55 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index e03034683156..a865bd8c46a3 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
>> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
>> }
>>
>> +#define clear_flush_young_ptes clear_flush_young_ptes
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + if (likely(nr == 1))
>> + return __ptep_clear_flush_young(vma, addr, ptep);
>> +
>> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
>> +}
>> +
>> #define wrprotect_ptes wrprotect_ptes
>> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
>> unsigned long addr, pte_t *ptep, unsigned int nr)
>> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
>> index d1094c2d5fb6..be594b274729 100644
>> --- a/include/linux/mmu_notifier.h
>> +++ b/include/linux/mmu_notifier.h
>> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
>> range->owner = owner;
>> }
>>
>> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
>> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
>> ({ \
>> int __young; \
>> struct vm_area_struct *___vma = __vma; \
>> unsigned long ___address = __address; \
>> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
>> + unsigned int ___nr = __nr; \
>> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
>> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
>> ___address, \
>> ___address + \
>> - PAGE_SIZE); \
>> + nr * PAGE_SIZE); \
>> __young; \
>> })
>
> Do we have an existing bug here, in that mmu_notifier_clear_flush_young() should
> have been called for CONT_PTES length if the folio was contpte mapped?
I can't call it a bug, because folio_referenced_one() does iterate
through each PTE of the large folio, but it is indeed inefficient.
On Thu, Dec 11, 2025 at 04:16:55PM +0800, Baolin Wang wrote:
> Currently, folio_referenced_one() always checks the young flag for each PTE
> sequentially, which is inefficient for large folios. This inefficiency is
> especially noticeable when reclaiming clean file-backed large folios, where
> folio_referenced() is observed as a significant performance hotspot.
>
> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
arm64 you mean :)
> an optimization to clear the young flags for PTEs within a contiguous range.
> However, this is not sufficient. We can extend this to perform batched operations
> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>
> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
> of the young flags and flushing TLB entries, thereby improving performance
> during large folio reclamation.
>
> Performance testing:
> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
> from approximately 35% to around 5%.
>
> W/o patchset:
> real 0m1.518s
> user 0m0.000s
> sys 0m1.518s
>
> W/ patchset:
> real 0m1.018s
> user 0m0.000s
> sys 0m1.018s
That's nice!
Have you performed the same kind of performance testing on non-arm64? As in the
past we've had a batch optimisation go horribly wrong on non-arm64 even if it
was ok on arm64 :)
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
> include/linux/mmu_notifier.h | 9 +++++----
> include/linux/pgtable.h | 19 +++++++++++++++++++
> mm/rmap.c | 22 ++++++++++++++++++++--
> 4 files changed, 55 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index e03034683156..a865bd8c46a3 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
> }
>
> +#define clear_flush_young_ptes clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + if (likely(nr == 1))
> + return __ptep_clear_flush_young(vma, addr, ptep);
> +
> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
> +}
Hmm again this is a weird way of exposing a contepte-specific function, you
really need to rework that as discussed in patch 1/3.
It seems to me we can share code to avoid this.
> +
> #define wrprotect_ptes wrprotect_ptes
> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep, unsigned int nr)
> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
> index d1094c2d5fb6..be594b274729 100644
> --- a/include/linux/mmu_notifier.h
> +++ b/include/linux/mmu_notifier.h
> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
> range->owner = owner;
> }
>
> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
> ({ \
> int __young; \
> struct vm_area_struct *___vma = __vma; \
> unsigned long ___address = __address; \
> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
> + unsigned int ___nr = __nr; \
> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
> ___address, \
> ___address + \
> - PAGE_SIZE); \
> + nr * PAGE_SIZE); \
> __young; \
> })
An aside, but I wonder why this needs to be a (pretty disgusting) macro?
>
> @@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>
> #define mmu_notifier_range_update_to_read_only(r) false
>
> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
> #define ptep_clear_young_notify ptep_test_and_clear_young
> #define pmdp_clear_young_notify pmdp_test_and_clear_young
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index b13b6f42be3c..c7d0fd228cb7 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
> }
> #endif
>
> +#ifndef clear_flush_young_ptes
> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr)
> +{
> + int young = 0;
> +
> + for (;;) {
> + young |= ptep_clear_flush_young(vma, addr, ptep);
> + if (--nr == 0)
> + break;
> + ptep++;
> + addr += PAGE_SIZE;
> + }
> +
> + return young;
> +}
> +#endif
> +
> /*
> * On some architectures hardware does not set page access bit when accessing
> * memory page, it is responsibility of software setting this bit. It brings
> diff --git a/mm/rmap.c b/mm/rmap.c
> index d6799afe1114..ec232165c47d 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
> struct folio_referenced_arg *pra = arg;
> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
> int ptes = 0, referenced = 0;
> + unsigned int nr;
>
> while (page_vma_mapped_walk(&pvmw)) {
> address = pvmw.address;
> + nr = 1;
>
> if (vma->vm_flags & VM_LOCKED) {
> ptes++;
> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
> if (lru_gen_look_around(&pvmw))
> referenced++;
> } else if (pvmw.pte) {
> + if (folio_test_large(folio)) {
> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
> + pte_t pteval = ptep_get(pvmw.pte);
> +
> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
I do wish we could put this fiddly logic into a helper for each place in
which we do similar kind 'end of the PTE table, maximum number we could
have' logic.
> + }
NIT but we're running into pretty long lines here.
> +
> + ptes += nr;
> if (ptep_clear_flush_young_notify(vma, address,
> - pvmw.pte))
> + pvmw.pte, nr))
> referenced++;
I find this referenced logic weird, it seems like it should be a boolean,
but this is outside the scope of your patch here :)
> + /* Skip the batched PTEs */
> + pvmw.pte += nr - 1;
> + pvmw.address += (nr - 1) * PAGE_SIZE;
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> if (pmdp_clear_flush_young_notify(vma, address,
> pvmw.pmd))
> @@ -886,7 +900,11 @@ static bool folio_referenced_one(struct folio *folio,
> WARN_ON_ONCE(1);
> }
>
> - pra->mapcount--;
> + pra->mapcount -= nr;
> + if (ptes == pvmw.nr_pages) {
> + page_vma_mapped_walk_done(&pvmw);
> + break;
> + }
> }
>
> if (referenced)
> --
> 2.47.3
>
On 2025/12/15 20:22, Lorenzo Stoakes wrote:
> On Thu, Dec 11, 2025 at 04:16:55PM +0800, Baolin Wang wrote:
>> Currently, folio_referenced_one() always checks the young flag for each PTE
>> sequentially, which is inefficient for large folios. This inefficiency is
>> especially noticeable when reclaiming clean file-backed large folios, where
>> folio_referenced() is observed as a significant performance hotspot.
>>
>> Moreover, on Arm architecture, which supports contiguous PTEs, there is already
>
> arm64 you mean :)
Right. Will make it clear.
>> an optimization to clear the young flags for PTEs within a contiguous range.
>> However, this is not sufficient. We can extend this to perform batched operations
>> for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
>>
>> Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
>> of the young flags and flushing TLB entries, thereby improving performance
>> during large folio reclamation.
>>
>> Performance testing:
>> Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to
>> reclaim 8G file-backed folios via the memory.reclaim interface. I can observe
>> 33% performance improvement on my Arm64 32-core server (and 10%+ improvement
>> on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped
>> from approximately 35% to around 5%.
>>
>> W/o patchset:
>> real 0m1.518s
>> user 0m0.000s
>> sys 0m1.518s
>>
>> W/ patchset:
>> real 0m1.018s
>> user 0m0.000s
>> sys 0m1.018s
>
> That's nice!
>
> Have you performed the same kind of performance testing on non-arm64? As in the
> past we've had a batch optimisation go horribly wrong on non-arm64 even if it
> was ok on arm64 :)
Yes, seems you missed my test results for the x86 machine in the commit
message :)
"I can observe 33% performance improvement on my Arm64 32-core server
(and 10%+ improvement on my X86 machine)."
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>> arch/arm64/include/asm/pgtable.h | 11 +++++++++++
>> include/linux/mmu_notifier.h | 9 +++++----
>> include/linux/pgtable.h | 19 +++++++++++++++++++
>> mm/rmap.c | 22 ++++++++++++++++++++--
>> 4 files changed, 55 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index e03034683156..a865bd8c46a3 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -1869,6 +1869,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
>> return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES);
>> }
>>
>> +#define clear_flush_young_ptes clear_flush_young_ptes
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + if (likely(nr == 1))
>> + return __ptep_clear_flush_young(vma, addr, ptep);
>> +
>> + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
>> +}
>
> Hmm again this is a weird way of exposing a contepte-specific function, you
> really need to rework that as discussed in patch 1/3.
>
> It seems to me we can share code to avoid this.
Sorry I don't think so. This is the current way of exposing a contpte
for Arm64. Please take a look at set_ptes(), clear_full_ptes(),
wrprotect_ptes() and so on (in this file).
>> #define wrprotect_ptes wrprotect_ptes
>> static __always_inline void wrprotect_ptes(struct mm_struct *mm,
>> unsigned long addr, pte_t *ptep, unsigned int nr)
>> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
>> index d1094c2d5fb6..be594b274729 100644
>> --- a/include/linux/mmu_notifier.h
>> +++ b/include/linux/mmu_notifier.h
>> @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
>> range->owner = owner;
>> }
>>
>> -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
>> +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
>> ({ \
>> int __young; \
>> struct vm_area_struct *___vma = __vma; \
>> unsigned long ___address = __address; \
>> - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
>> + unsigned int ___nr = __nr; \
>> + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
>> __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
>> ___address, \
>> ___address + \
>> - PAGE_SIZE); \
>> + nr * PAGE_SIZE); \
>> __young; \
>> })
>
> An aside, but I wonder why this needs to be a (pretty disgusting) macro?
Um, I can send a follow-up to clean up all these related macros.
>> @@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
>>
>> #define mmu_notifier_range_update_to_read_only(r) false
>>
>> -#define ptep_clear_flush_young_notify ptep_clear_flush_young
>> +#define ptep_clear_flush_young_notify clear_flush_young_ptes
>> #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
>> #define ptep_clear_young_notify ptep_test_and_clear_young
>> #define pmdp_clear_young_notify pmdp_test_and_clear_young
>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>> index b13b6f42be3c..c7d0fd228cb7 100644
>> --- a/include/linux/pgtable.h
>> +++ b/include/linux/pgtable.h
>> @@ -947,6 +947,25 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
>> }
>> #endif
>>
>> +#ifndef clear_flush_young_ptes
>> +static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
>> + unsigned long addr, pte_t *ptep,
>> + unsigned int nr)
>> +{
>> + int young = 0;
>> +
>> + for (;;) {
>> + young |= ptep_clear_flush_young(vma, addr, ptep);
>> + if (--nr == 0)
>> + break;
>> + ptep++;
>> + addr += PAGE_SIZE;
>> + }
>> +
>> + return young;
>> +}
>> +#endif
>> +
>> /*
>> * On some architectures hardware does not set page access bit when accessing
>> * memory page, it is responsibility of software setting this bit. It brings
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index d6799afe1114..ec232165c47d 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
>> struct folio_referenced_arg *pra = arg;
>> DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
>> int ptes = 0, referenced = 0;
>> + unsigned int nr;
>>
>> while (page_vma_mapped_walk(&pvmw)) {
>> address = pvmw.address;
>> + nr = 1;
>>
>> if (vma->vm_flags & VM_LOCKED) {
>> ptes++;
>> @@ -874,9 +876,21 @@ static bool folio_referenced_one(struct folio *folio,
>> if (lru_gen_look_around(&pvmw))
>> referenced++;
>> } else if (pvmw.pte) {
>> + if (folio_test_large(folio)) {
>> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
>> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
>> + pte_t pteval = ptep_get(pvmw.pte);
>> +
>> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
>
> I do wish we could put this fiddly logic into a helper for each place in
> which we do similar kind 'end of the PTE table, maximum number we could
> have' logic.
Um, the logic is already simple, and I don’t think adding a new helper
would improve readability. If some code can reuse this logic, we can
factor it out into a helper at that point.
>> + }
>
> NIT but we're running into pretty long lines here.
OK. Will fix this.
>> +
>> + ptes += nr;
>> if (ptep_clear_flush_young_notify(vma, address,
>> - pvmw.pte))
>> + pvmw.pte, nr))
>> referenced++;
>
> I find this referenced logic weird, it seems like it should be a boolean,
> but this is outside the scope of your patch here :)
Right. Thanks for reviewing.
© 2016 - 2026 Red Hat, Inc.