From: Chuanhua Han <hanchuanhua@oppo.com>
When a large folio is found in the swapcache, the current implementation
requires calling do_swap_page() nr_pages times, resulting in nr_pages
page faults. This patch opts to map the entire large folio at once to
minimize page faults. Additionally, redundant checks and early exits
for ARM64 MTE restoring are removed.
Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
Co-developed-by: Barry Song <v-songbaohua@oppo.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 48 insertions(+), 12 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 22e7c33cc747..940fdbe69fa1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
pte_t pte;
vm_fault_t ret = 0;
void *shadow = NULL;
+ int nr_pages = 1;
+ unsigned long page_idx = 0;
+ unsigned long address = vmf->address;
+ pte_t *ptep;
if (!pte_unmap_same(vmf))
goto out;
@@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_nomap;
}
+ ptep = vmf->pte;
+ if (folio_test_large(folio) && folio_test_swapcache(folio)) {
+ int nr = folio_nr_pages(folio);
+ unsigned long idx = folio_page_idx(folio, page);
+ unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
+ unsigned long folio_end = folio_start + nr * PAGE_SIZE;
+ pte_t *folio_ptep;
+ pte_t folio_pte;
+
+ if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
+ goto check_folio;
+ if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
+ goto check_folio;
+
+ folio_ptep = vmf->pte - idx;
+ folio_pte = ptep_get(folio_ptep);
+ if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
+ swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
+ goto check_folio;
+
+ page_idx = idx;
+ address = folio_start;
+ ptep = folio_ptep;
+ nr_pages = nr;
+ entry = folio->swap;
+ page = &folio->page;
+ }
+
+check_folio:
+
/*
* PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
* must never point at an anonymous page in the swapcache that is
@@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* We're already holding a reference on the page but haven't mapped it
* yet.
*/
- swap_free_nr(entry, 1);
+ swap_free_nr(entry, nr_pages);
if (should_try_to_free_swap(folio, vma, vmf->flags))
folio_free_swap(folio);
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+ folio_ref_add(folio, nr_pages - 1);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
pte = mk_pte(page, vma->vm_page_prot);
/*
@@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* exclusivity.
*/
if (!folio_test_ksm(folio) &&
- (exclusive || folio_ref_count(folio) == 1)) {
+ (exclusive || (folio_ref_count(folio) == nr_pages &&
+ folio_nr_pages(folio) == nr_pages))) {
if (vmf->flags & FAULT_FLAG_WRITE) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE;
}
rmap_flags |= RMAP_EXCLUSIVE;
}
- flush_icache_page(vma, page);
+ flush_icache_pages(vma, page, nr_pages);
if (pte_swp_soft_dirty(vmf->orig_pte))
pte = pte_mksoft_dirty(pte);
if (pte_swp_uffd_wp(vmf->orig_pte))
pte = pte_mkuffd_wp(pte);
- vmf->orig_pte = pte;
+ vmf->orig_pte = pte_advance_pfn(pte, page_idx);
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
- folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_add_new_anon_rmap(folio, vma, address);
folio_add_lru_vma(folio, vma);
} else {
- folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+ folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
(pte_write(pte) && !PageAnonExclusive(page)));
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
- arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address,
- pte, vmf->orig_pte, 1);
+ set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
+ arch_do_swap_page_nr(vma->vm_mm, vma, address,
+ pte, pte, nr_pages);
folio_unlock(folio);
if (folio != swapcache && swapcache) {
@@ -4291,7 +4327,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/* No need to invalidate - it was non-present before */
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
--
2.34.1
On 03.05.24 02:50, Barry Song wrote:
> From: Chuanhua Han <hanchuanhua@oppo.com>
>
> When a large folio is found in the swapcache, the current implementation
> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> page faults. This patch opts to map the entire large folio at once to
> minimize page faults. Additionally, redundant checks and early exits
> for ARM64 MTE restoring are removed.
>
> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 48 insertions(+), 12 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 22e7c33cc747..940fdbe69fa1 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> pte_t pte;
> vm_fault_t ret = 0;
> void *shadow = NULL;
> + int nr_pages = 1;
> + unsigned long page_idx = 0;
> + unsigned long address = vmf->address;
> + pte_t *ptep;
>
> if (!pte_unmap_same(vmf))
> goto out;
> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> goto out_nomap;
> }
>
> + ptep = vmf->pte;
> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> + int nr = folio_nr_pages(folio);
> + unsigned long idx = folio_page_idx(folio, page);
> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> + pte_t *folio_ptep;
> + pte_t folio_pte;
> +
> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> + goto check_folio;
> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> + goto check_folio;
> +
> + folio_ptep = vmf->pte - idx;
> + folio_pte = ptep_get(folio_ptep);
> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> + goto check_folio;
> +
> + page_idx = idx;
> + address = folio_start;
> + ptep = folio_ptep;
> + nr_pages = nr;
> + entry = folio->swap;
> + page = &folio->page;
> + }
> +
> +check_folio:
> +
> /*
> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> * must never point at an anonymous page in the swapcache that is
> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> * We're already holding a reference on the page but haven't mapped it
> * yet.
> */
> - swap_free_nr(entry, 1);
> + swap_free_nr(entry, nr_pages);
> if (should_try_to_free_swap(folio, vma, vmf->flags))
> folio_free_swap(folio);
>
> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> + folio_ref_add(folio, nr_pages - 1);
> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> pte = mk_pte(page, vma->vm_page_prot);
>
> /*
> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> * exclusivity.
> */
> if (!folio_test_ksm(folio) &&
> - (exclusive || folio_ref_count(folio) == 1)) {
> + (exclusive || (folio_ref_count(folio) == nr_pages &&
> + folio_nr_pages(folio) == nr_pages))) {
> if (vmf->flags & FAULT_FLAG_WRITE) {
> pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> vmf->flags &= ~FAULT_FLAG_WRITE;
I fail to convince myself that this change is correct, and if it is
correct, it's confusing (I think there is a dependency on
folio_free_swap() having been called and succeeding, such that we don't
have a folio that is in the swapcache at this point).
Why can't we move the folio_ref_add() after this check and just leave
the check as it is?
"folio_ref_count(folio) == 1" is as clear as it gets: we hold the single
reference, so we can do with this thing whatever we want: it's certainly
exclusive. No swapcache, no other people mapping it.
--
Cheers,
David / dhildenb
On Tue, May 7, 2024 at 12:05 AM David Hildenbrand <david@redhat.com> wrote:
>
> On 03.05.24 02:50, Barry Song wrote:
> > From: Chuanhua Han <hanchuanhua@oppo.com>
> >
> > When a large folio is found in the swapcache, the current implementation
> > requires calling do_swap_page() nr_pages times, resulting in nr_pages
> > page faults. This patch opts to map the entire large folio at once to
> > minimize page faults. Additionally, redundant checks and early exits
> > for ARM64 MTE restoring are removed.
> >
> > Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> > Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > ---
> > mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> > 1 file changed, 48 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 22e7c33cc747..940fdbe69fa1 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > pte_t pte;
> > vm_fault_t ret = 0;
> > void *shadow = NULL;
> > + int nr_pages = 1;
> > + unsigned long page_idx = 0;
> > + unsigned long address = vmf->address;
> > + pte_t *ptep;
> >
> > if (!pte_unmap_same(vmf))
> > goto out;
> > @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > goto out_nomap;
> > }
> >
> > + ptep = vmf->pte;
> > + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> > + int nr = folio_nr_pages(folio);
> > + unsigned long idx = folio_page_idx(folio, page);
> > + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> > + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> > + pte_t *folio_ptep;
> > + pte_t folio_pte;
> > +
> > + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> > + goto check_folio;
> > + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> > + goto check_folio;
> > +
> > + folio_ptep = vmf->pte - idx;
> > + folio_pte = ptep_get(folio_ptep);
> > + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> > + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> > + goto check_folio;
> > +
> > + page_idx = idx;
> > + address = folio_start;
> > + ptep = folio_ptep;
> > + nr_pages = nr;
> > + entry = folio->swap;
> > + page = &folio->page;
> > + }
> > +
> > +check_folio:
> > +
> > /*
> > * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> > * must never point at an anonymous page in the swapcache that is
> > @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > * We're already holding a reference on the page but haven't mapped it
> > * yet.
> > */
> > - swap_free_nr(entry, 1);
> > + swap_free_nr(entry, nr_pages);
> > if (should_try_to_free_swap(folio, vma, vmf->flags))
> > folio_free_swap(folio);
> >
> > - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> > - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> > + folio_ref_add(folio, nr_pages - 1);
> > + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> > + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> > pte = mk_pte(page, vma->vm_page_prot);
> >
> > /*
> > @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > * exclusivity.
> > */
> > if (!folio_test_ksm(folio) &&
> > - (exclusive || folio_ref_count(folio) == 1)) {
> > + (exclusive || (folio_ref_count(folio) == nr_pages &&
> > + folio_nr_pages(folio) == nr_pages))) {
> > if (vmf->flags & FAULT_FLAG_WRITE) {
> > pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> > vmf->flags &= ~FAULT_FLAG_WRITE;
>
> I fail to convince myself that this change is correct, and if it is
> correct, it's confusing (I think there is a dependency on
> folio_free_swap() having been called and succeeding, such that we don't
> have a folio that is in the swapcache at this point).
>
> Why can't we move the folio_ref_add() after this check and just leave
> the check as it is?
>
> "folio_ref_count(folio) == 1" is as clear as it gets: we hold the single
> reference, so we can do with this thing whatever we want: it's certainly
> exclusive. No swapcache, no other people mapping it.
Right.
I believe the code works correctly but is a bit confusing. as you said,
we might move folio_ref_add() behind folio_ref_count(folio) == 1.
>
>
> --
> Cheers,
>
> David / dhildenb
>
Thanks
Barry
On 03/05/2024 01:50, Barry Song wrote:
> From: Chuanhua Han <hanchuanhua@oppo.com>
>
> When a large folio is found in the swapcache, the current implementation
> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> page faults. This patch opts to map the entire large folio at once to
> minimize page faults. Additionally, redundant checks and early exits
> for ARM64 MTE restoring are removed.
>
> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
With the suggested changes below:
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 48 insertions(+), 12 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 22e7c33cc747..940fdbe69fa1 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> pte_t pte;
> vm_fault_t ret = 0;
> void *shadow = NULL;
> + int nr_pages = 1;
> + unsigned long page_idx = 0;
> + unsigned long address = vmf->address;
> + pte_t *ptep;
nit: Personally I'd prefer all these to get initialised just before the "if
(folio_test_large()..." block below. That way it is clear they are fresh (incase
any logic between here and there makes an adjustment) and its clear that they
are only to be used after that block (the compiler will warn if using an
uninitialized value).
>
> if (!pte_unmap_same(vmf))
> goto out;
> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> goto out_nomap;
> }
>
> + ptep = vmf->pte;
> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> + int nr = folio_nr_pages(folio);
> + unsigned long idx = folio_page_idx(folio, page);
> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> + pte_t *folio_ptep;
> + pte_t folio_pte;
> +
> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> + goto check_folio;
> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> + goto check_folio;
> +
> + folio_ptep = vmf->pte - idx;
> + folio_pte = ptep_get(folio_ptep);
> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> + goto check_folio;
> +
> + page_idx = idx;
> + address = folio_start;
> + ptep = folio_ptep;
> + nr_pages = nr;
> + entry = folio->swap;
> + page = &folio->page;
> + }
> +
> +check_folio:
Is this still the correct label name, given the checks are now above the new
block? Perhaps "one_page" or something like that?
> +
> /*
> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> * must never point at an anonymous page in the swapcache that is
> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> * We're already holding a reference on the page but haven't mapped it
> * yet.
> */
> - swap_free_nr(entry, 1);
> + swap_free_nr(entry, nr_pages);
> if (should_try_to_free_swap(folio, vma, vmf->flags))
> folio_free_swap(folio);
>
> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> + folio_ref_add(folio, nr_pages - 1);
> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> pte = mk_pte(page, vma->vm_page_prot);
>
> /*
> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> * exclusivity.
> */
> if (!folio_test_ksm(folio) &&
> - (exclusive || folio_ref_count(folio) == 1)) {
> + (exclusive || (folio_ref_count(folio) == nr_pages &&
> + folio_nr_pages(folio) == nr_pages))) {
I think in practice there is no change here? If nr_pages > 1 then the folio is
in the swapcache, so there is an extra ref on it? I agree with the change for
robustness sake. Just checking my understanding.
> if (vmf->flags & FAULT_FLAG_WRITE) {
> pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> vmf->flags &= ~FAULT_FLAG_WRITE;
> }
> rmap_flags |= RMAP_EXCLUSIVE;
> }
> - flush_icache_page(vma, page);
> + flush_icache_pages(vma, page, nr_pages);
> if (pte_swp_soft_dirty(vmf->orig_pte))
> pte = pte_mksoft_dirty(pte);
> if (pte_swp_uffd_wp(vmf->orig_pte))
> pte = pte_mkuffd_wp(pte);
> - vmf->orig_pte = pte;
> + vmf->orig_pte = pte_advance_pfn(pte, page_idx);
>
> /* ksm created a completely new copy */
> if (unlikely(folio != swapcache && swapcache)) {
> - folio_add_new_anon_rmap(folio, vma, vmf->address);
> + folio_add_new_anon_rmap(folio, vma, address);
> folio_add_lru_vma(folio, vma);
> } else {
> - folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
> + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
> rmap_flags);
> }
>
> VM_BUG_ON(!folio_test_anon(folio) ||
> (pte_write(pte) && !PageAnonExclusive(page)));
> - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
> - arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address,
> - pte, vmf->orig_pte, 1);
> + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
> + arch_do_swap_page_nr(vma->vm_mm, vma, address,
> + pte, pte, nr_pages);
>
> folio_unlock(folio);
> if (folio != swapcache && swapcache) {
> @@ -4291,7 +4327,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> }
>
> /* No need to invalidate - it was non-present before */
> - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
> + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
> unlock:
> if (vmf->pte)
> pte_unmap_unlock(vmf->pte, vmf->ptl);
On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
>
> On 03/05/2024 01:50, Barry Song wrote:
> > From: Chuanhua Han <hanchuanhua@oppo.com>
> >
> > When a large folio is found in the swapcache, the current implementation
> > requires calling do_swap_page() nr_pages times, resulting in nr_pages
> > page faults. This patch opts to map the entire large folio at once to
> > minimize page faults. Additionally, redundant checks and early exits
> > for ARM64 MTE restoring are removed.
> >
> > Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> > Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>
> With the suggested changes below:
>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>
> > ---
> > mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> > 1 file changed, 48 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 22e7c33cc747..940fdbe69fa1 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > pte_t pte;
> > vm_fault_t ret = 0;
> > void *shadow = NULL;
> > + int nr_pages = 1;
> > + unsigned long page_idx = 0;
> > + unsigned long address = vmf->address;
> > + pte_t *ptep;
>
> nit: Personally I'd prefer all these to get initialised just before the "if
> (folio_test_large()..." block below. That way it is clear they are fresh (incase
> any logic between here and there makes an adjustment) and its clear that they
> are only to be used after that block (the compiler will warn if using an
> uninitialized value).
right. I agree this will make the code more readable.
>
> >
> > if (!pte_unmap_same(vmf))
> > goto out;
> > @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > goto out_nomap;
> > }
> >
> > + ptep = vmf->pte;
> > + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> > + int nr = folio_nr_pages(folio);
> > + unsigned long idx = folio_page_idx(folio, page);
> > + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> > + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> > + pte_t *folio_ptep;
> > + pte_t folio_pte;
> > +
> > + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> > + goto check_folio;
> > + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> > + goto check_folio;
> > +
> > + folio_ptep = vmf->pte - idx;
> > + folio_pte = ptep_get(folio_ptep);
> > + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> > + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> > + goto check_folio;
> > +
> > + page_idx = idx;
> > + address = folio_start;
> > + ptep = folio_ptep;
> > + nr_pages = nr;
> > + entry = folio->swap;
> > + page = &folio->page;
> > + }
> > +
> > +check_folio:
>
> Is this still the correct label name, given the checks are now above the new
> block? Perhaps "one_page" or something like that?
not quite sure about this, as the code after one_page can be multiple_pages.
On the other hand, it seems we are really checking folio after "check_folio"
:-)
BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
/*
* Check under PT lock (to protect against concurrent fork() sharing
* the swap entry concurrently) for certainly exclusive pages.
*/
if (!folio_test_ksm(folio)) {
>
> > +
> > /*
> > * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> > * must never point at an anonymous page in the swapcache that is
> > @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > * We're already holding a reference on the page but haven't mapped it
> > * yet.
> > */
> > - swap_free_nr(entry, 1);
> > + swap_free_nr(entry, nr_pages);
> > if (should_try_to_free_swap(folio, vma, vmf->flags))
> > folio_free_swap(folio);
> >
> > - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> > - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> > + folio_ref_add(folio, nr_pages - 1);
> > + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> > + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> > pte = mk_pte(page, vma->vm_page_prot);
> >
> > /*
> > @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > * exclusivity.
> > */
> > if (!folio_test_ksm(folio) &&
> > - (exclusive || folio_ref_count(folio) == 1)) {
> > + (exclusive || (folio_ref_count(folio) == nr_pages &&
> > + folio_nr_pages(folio) == nr_pages))) {
>
> I think in practice there is no change here? If nr_pages > 1 then the folio is
> in the swapcache, so there is an extra ref on it? I agree with the change for
> robustness sake. Just checking my understanding.
This is the code showing we are reusing/(mkwrite) a folio either
1. we meet a small folio and we are the only one hitting the small folio
2. we meet a large folio and we are the only one hitting the large folio
any corner cases besides the above two seems difficult. for example,
while we hit a large folio in swapcache but if we can't entirely map it
(nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
== nr_pages == 1, in this case, lacking folio_nr_pages(folio) == nr_pages
might lead to mkwrite() on a single pte within a partially unmapped large
folio. not quite sure this is wrong, but seems buggy and arduous.
>
> > if (vmf->flags & FAULT_FLAG_WRITE) {
> > pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> > vmf->flags &= ~FAULT_FLAG_WRITE;
> > }
> > rmap_flags |= RMAP_EXCLUSIVE;
> > }
> > - flush_icache_page(vma, page);
> > + flush_icache_pages(vma, page, nr_pages);
> > if (pte_swp_soft_dirty(vmf->orig_pte))
> > pte = pte_mksoft_dirty(pte);
> > if (pte_swp_uffd_wp(vmf->orig_pte))
> > pte = pte_mkuffd_wp(pte);
> > - vmf->orig_pte = pte;
> > + vmf->orig_pte = pte_advance_pfn(pte, page_idx);
> >
> > /* ksm created a completely new copy */
> > if (unlikely(folio != swapcache && swapcache)) {
> > - folio_add_new_anon_rmap(folio, vma, vmf->address);
> > + folio_add_new_anon_rmap(folio, vma, address);
> > folio_add_lru_vma(folio, vma);
> > } else {
> > - folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
> > + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
> > rmap_flags);
> > }
> >
> > VM_BUG_ON(!folio_test_anon(folio) ||
> > (pte_write(pte) && !PageAnonExclusive(page)));
> > - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
> > - arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address,
> > - pte, vmf->orig_pte, 1);
> > + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
> > + arch_do_swap_page_nr(vma->vm_mm, vma, address,
> > + pte, pte, nr_pages);
> >
> > folio_unlock(folio);
> > if (folio != swapcache && swapcache) {
> > @@ -4291,7 +4327,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > }
> >
> > /* No need to invalidate - it was non-present before */
> > - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
> > + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
> > unlock:
> > if (vmf->pte)
> > pte_unmap_unlock(vmf->pte, vmf->ptl);
>
Thanks
Barry
On 04/05/2024 00:23, Barry Song wrote:
> On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
>>
>> On 03/05/2024 01:50, Barry Song wrote:
>>> From: Chuanhua Han <hanchuanhua@oppo.com>
>>>
>>> When a large folio is found in the swapcache, the current implementation
>>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
>>> page faults. This patch opts to map the entire large folio at once to
>>> minimize page faults. Additionally, redundant checks and early exits
>>> for ARM64 MTE restoring are removed.
>>>
>>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
>>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>>
>> With the suggested changes below:
>>
>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>
>>> ---
>>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
>>> 1 file changed, 48 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index 22e7c33cc747..940fdbe69fa1 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> pte_t pte;
>>> vm_fault_t ret = 0;
>>> void *shadow = NULL;
>>> + int nr_pages = 1;
>>> + unsigned long page_idx = 0;
>>> + unsigned long address = vmf->address;
>>> + pte_t *ptep;
>>
>> nit: Personally I'd prefer all these to get initialised just before the "if
>> (folio_test_large()..." block below. That way it is clear they are fresh (incase
>> any logic between here and there makes an adjustment) and its clear that they
>> are only to be used after that block (the compiler will warn if using an
>> uninitialized value).
>
> right. I agree this will make the code more readable.
>
>>
>>>
>>> if (!pte_unmap_same(vmf))
>>> goto out;
>>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> goto out_nomap;
>>> }
>>>
>>> + ptep = vmf->pte;
>>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
>>> + int nr = folio_nr_pages(folio);
>>> + unsigned long idx = folio_page_idx(folio, page);
>>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
>>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
>>> + pte_t *folio_ptep;
>>> + pte_t folio_pte;
>>> +
>>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
>>> + goto check_folio;
>>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
>>> + goto check_folio;
>>> +
>>> + folio_ptep = vmf->pte - idx;
>>> + folio_pte = ptep_get(folio_ptep);
>>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
>>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
>>> + goto check_folio;
>>> +
>>> + page_idx = idx;
>>> + address = folio_start;
>>> + ptep = folio_ptep;
>>> + nr_pages = nr;
>>> + entry = folio->swap;
>>> + page = &folio->page;
>>> + }
>>> +
>>> +check_folio:
>>
>> Is this still the correct label name, given the checks are now above the new
>> block? Perhaps "one_page" or something like that?
>
> not quite sure about this, as the code after one_page can be multiple_pages.
> On the other hand, it seems we are really checking folio after "check_folio"
> :-)
Yeah fair enough. Ignore my comment.
>
>
> BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
> BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
>
> /*
> * Check under PT lock (to protect against concurrent fork() sharing
> * the swap entry concurrently) for certainly exclusive pages.
> */
> if (!folio_test_ksm(folio)) {
>
>
>>
>>> +
>>> /*
>>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
>>> * must never point at an anonymous page in the swapcache that is
>>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> * We're already holding a reference on the page but haven't mapped it
>>> * yet.
>>> */
>>> - swap_free_nr(entry, 1);
>>> + swap_free_nr(entry, nr_pages);
>>> if (should_try_to_free_swap(folio, vma, vmf->flags))
>>> folio_free_swap(folio);
>>>
>>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
>>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
>>> + folio_ref_add(folio, nr_pages - 1);
>>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
>>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
>>> pte = mk_pte(page, vma->vm_page_prot);
>>>
>>> /*
>>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> * exclusivity.
>>> */
>>> if (!folio_test_ksm(folio) &&
>>> - (exclusive || folio_ref_count(folio) == 1)) {
>>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
>>> + folio_nr_pages(folio) == nr_pages))) {
>>
>> I think in practice there is no change here? If nr_pages > 1 then the folio is
>> in the swapcache, so there is an extra ref on it? I agree with the change for
>> robustness sake. Just checking my understanding.
>
> This is the code showing we are reusing/(mkwrite) a folio either
> 1. we meet a small folio and we are the only one hitting the small folio
> 2. we meet a large folio and we are the only one hitting the large folio
>
> any corner cases besides the above two seems difficult. for example,
>
> while we hit a large folio in swapcache but if we can't entirely map it
> (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
> == nr_pages == 1, in this case, lacking folio_nr_pages(folio) == nr_pages
> might lead to mkwrite() on a single pte within a partially unmapped large
> folio. not quite sure this is wrong, but seems buggy and arduous.
>
>>
>>> if (vmf->flags & FAULT_FLAG_WRITE) {
>>> pte = maybe_mkwrite(pte_mkdirty(pte), vma);
>>> vmf->flags &= ~FAULT_FLAG_WRITE;
>>> }
>>> rmap_flags |= RMAP_EXCLUSIVE;
>>> }
>>> - flush_icache_page(vma, page);
>>> + flush_icache_pages(vma, page, nr_pages);
>>> if (pte_swp_soft_dirty(vmf->orig_pte))
>>> pte = pte_mksoft_dirty(pte);
>>> if (pte_swp_uffd_wp(vmf->orig_pte))
>>> pte = pte_mkuffd_wp(pte);
>>> - vmf->orig_pte = pte;
>>> + vmf->orig_pte = pte_advance_pfn(pte, page_idx);
>>>
>>> /* ksm created a completely new copy */
>>> if (unlikely(folio != swapcache && swapcache)) {
>>> - folio_add_new_anon_rmap(folio, vma, vmf->address);
>>> + folio_add_new_anon_rmap(folio, vma, address);
>>> folio_add_lru_vma(folio, vma);
>>> } else {
>>> - folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
>>> + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
>>> rmap_flags);
>>> }
>>>
>>> VM_BUG_ON(!folio_test_anon(folio) ||
>>> (pte_write(pte) && !PageAnonExclusive(page)));
>>> - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
>>> - arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address,
>>> - pte, vmf->orig_pte, 1);
>>> + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
>>> + arch_do_swap_page_nr(vma->vm_mm, vma, address,
>>> + pte, pte, nr_pages);
>>>
>>> folio_unlock(folio);
>>> if (folio != swapcache && swapcache) {
>>> @@ -4291,7 +4327,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> }
>>>
>>> /* No need to invalidate - it was non-present before */
>>> - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
>>> + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
>>> unlock:
>>> if (vmf->pte)
>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>
>
> Thanks
> Barry
On 04.05.24 01:23, Barry Song wrote:
> On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
>>
>> On 03/05/2024 01:50, Barry Song wrote:
>>> From: Chuanhua Han <hanchuanhua@oppo.com>
>>>
>>> When a large folio is found in the swapcache, the current implementation
>>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
>>> page faults. This patch opts to map the entire large folio at once to
>>> minimize page faults. Additionally, redundant checks and early exits
>>> for ARM64 MTE restoring are removed.
>>>
>>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
>>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>>
>> With the suggested changes below:
>>
>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>
>>> ---
>>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
>>> 1 file changed, 48 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index 22e7c33cc747..940fdbe69fa1 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> pte_t pte;
>>> vm_fault_t ret = 0;
>>> void *shadow = NULL;
>>> + int nr_pages = 1;
>>> + unsigned long page_idx = 0;
>>> + unsigned long address = vmf->address;
>>> + pte_t *ptep;
>>
>> nit: Personally I'd prefer all these to get initialised just before the "if
>> (folio_test_large()..." block below. That way it is clear they are fresh (incase
>> any logic between here and there makes an adjustment) and its clear that they
>> are only to be used after that block (the compiler will warn if using an
>> uninitialized value).
>
> right. I agree this will make the code more readable.
>
>>
>>>
>>> if (!pte_unmap_same(vmf))
>>> goto out;
>>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> goto out_nomap;
>>> }
>>>
>>> + ptep = vmf->pte;
>>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
>>> + int nr = folio_nr_pages(folio);
>>> + unsigned long idx = folio_page_idx(folio, page);
>>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
>>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
>>> + pte_t *folio_ptep;
>>> + pte_t folio_pte;
>>> +
>>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
>>> + goto check_folio;
>>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
>>> + goto check_folio;
>>> +
>>> + folio_ptep = vmf->pte - idx;
>>> + folio_pte = ptep_get(folio_ptep);
>>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
>>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
>>> + goto check_folio;
>>> +
>>> + page_idx = idx;
>>> + address = folio_start;
>>> + ptep = folio_ptep;
>>> + nr_pages = nr;
>>> + entry = folio->swap;
>>> + page = &folio->page;
>>> + }
>>> +
>>> +check_folio:
>>
>> Is this still the correct label name, given the checks are now above the new
>> block? Perhaps "one_page" or something like that?
>
> not quite sure about this, as the code after one_page can be multiple_pages.
> On the other hand, it seems we are really checking folio after "check_folio"
> :-)
>
>
> BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
> BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
>
> /*
> * Check under PT lock (to protect against concurrent fork() sharing
> * the swap entry concurrently) for certainly exclusive pages.
> */
> if (!folio_test_ksm(folio)) {
>
>
>>
>>> +
>>> /*
>>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
>>> * must never point at an anonymous page in the swapcache that is
>>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> * We're already holding a reference on the page but haven't mapped it
>>> * yet.
>>> */
>>> - swap_free_nr(entry, 1);
>>> + swap_free_nr(entry, nr_pages);
>>> if (should_try_to_free_swap(folio, vma, vmf->flags))
>>> folio_free_swap(folio);
>>>
>>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
>>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
>>> + folio_ref_add(folio, nr_pages - 1);
>>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
>>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
>>> pte = mk_pte(page, vma->vm_page_prot);
>>>
>>> /*
>>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>> * exclusivity.
>>> */
>>> if (!folio_test_ksm(folio) &&
>>> - (exclusive || folio_ref_count(folio) == 1)) {
>>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
>>> + folio_nr_pages(folio) == nr_pages))) {
>>
>> I think in practice there is no change here? If nr_pages > 1 then the folio is
>> in the swapcache, so there is an extra ref on it? I agree with the change for
>> robustness sake. Just checking my understanding.
>
> This is the code showing we are reusing/(mkwrite) a folio either
> 1. we meet a small folio and we are the only one hitting the small folio
> 2. we meet a large folio and we are the only one hitting the large folio
>
> any corner cases besides the above two seems difficult. for example,
>
> while we hit a large folio in swapcache but if we can't entirely map it
> (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
> == nr_pages == 1
No, there would be other references from the swapcache and
folio_ref_count(folio) > 1. See my other reply.
--
Cheers,
David / dhildenb
On Tue, May 7, 2024 at 12:07 AM David Hildenbrand <david@redhat.com> wrote:
>
> On 04.05.24 01:23, Barry Song wrote:
> > On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
> >>
> >> On 03/05/2024 01:50, Barry Song wrote:
> >>> From: Chuanhua Han <hanchuanhua@oppo.com>
> >>>
> >>> When a large folio is found in the swapcache, the current implementation
> >>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> >>> page faults. This patch opts to map the entire large folio at once to
> >>> minimize page faults. Additionally, redundant checks and early exits
> >>> for ARM64 MTE restoring are removed.
> >>>
> >>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> >>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> >>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> >>
> >> With the suggested changes below:
> >>
> >> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> >>
> >>> ---
> >>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> >>> 1 file changed, 48 insertions(+), 12 deletions(-)
> >>>
> >>> diff --git a/mm/memory.c b/mm/memory.c
> >>> index 22e7c33cc747..940fdbe69fa1 100644
> >>> --- a/mm/memory.c
> >>> +++ b/mm/memory.c
> >>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>> pte_t pte;
> >>> vm_fault_t ret = 0;
> >>> void *shadow = NULL;
> >>> + int nr_pages = 1;
> >>> + unsigned long page_idx = 0;
> >>> + unsigned long address = vmf->address;
> >>> + pte_t *ptep;
> >>
> >> nit: Personally I'd prefer all these to get initialised just before the "if
> >> (folio_test_large()..." block below. That way it is clear they are fresh (incase
> >> any logic between here and there makes an adjustment) and its clear that they
> >> are only to be used after that block (the compiler will warn if using an
> >> uninitialized value).
> >
> > right. I agree this will make the code more readable.
> >
> >>
> >>>
> >>> if (!pte_unmap_same(vmf))
> >>> goto out;
> >>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>> goto out_nomap;
> >>> }
> >>>
> >>> + ptep = vmf->pte;
> >>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> >>> + int nr = folio_nr_pages(folio);
> >>> + unsigned long idx = folio_page_idx(folio, page);
> >>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> >>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> >>> + pte_t *folio_ptep;
> >>> + pte_t folio_pte;
> >>> +
> >>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> >>> + goto check_folio;
> >>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> >>> + goto check_folio;
> >>> +
> >>> + folio_ptep = vmf->pte - idx;
> >>> + folio_pte = ptep_get(folio_ptep);
> >>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> >>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> >>> + goto check_folio;
> >>> +
> >>> + page_idx = idx;
> >>> + address = folio_start;
> >>> + ptep = folio_ptep;
> >>> + nr_pages = nr;
> >>> + entry = folio->swap;
> >>> + page = &folio->page;
> >>> + }
> >>> +
> >>> +check_folio:
> >>
> >> Is this still the correct label name, given the checks are now above the new
> >> block? Perhaps "one_page" or something like that?
> >
> > not quite sure about this, as the code after one_page can be multiple_pages.
> > On the other hand, it seems we are really checking folio after "check_folio"
> > :-)
> >
> >
> > BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
> > BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
> >
> > /*
> > * Check under PT lock (to protect against concurrent fork() sharing
> > * the swap entry concurrently) for certainly exclusive pages.
> > */
> > if (!folio_test_ksm(folio)) {
> >
> >
> >>
> >>> +
> >>> /*
> >>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> >>> * must never point at an anonymous page in the swapcache that is
> >>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>> * We're already holding a reference on the page but haven't mapped it
> >>> * yet.
> >>> */
> >>> - swap_free_nr(entry, 1);
> >>> + swap_free_nr(entry, nr_pages);
> >>> if (should_try_to_free_swap(folio, vma, vmf->flags))
> >>> folio_free_swap(folio);
> >>>
> >>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> >>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> >>> + folio_ref_add(folio, nr_pages - 1);
> >>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> >>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> >>> pte = mk_pte(page, vma->vm_page_prot);
> >>>
> >>> /*
> >>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>> * exclusivity.
> >>> */
> >>> if (!folio_test_ksm(folio) &&
> >>> - (exclusive || folio_ref_count(folio) == 1)) {
> >>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
> >>> + folio_nr_pages(folio) == nr_pages))) {
> >>
> >> I think in practice there is no change here? If nr_pages > 1 then the folio is
> >> in the swapcache, so there is an extra ref on it? I agree with the change for
> >> robustness sake. Just checking my understanding.
> >
> > This is the code showing we are reusing/(mkwrite) a folio either
> > 1. we meet a small folio and we are the only one hitting the small folio
> > 2. we meet a large folio and we are the only one hitting the large folio
> >
> > any corner cases besides the above two seems difficult. for example,
> >
> > while we hit a large folio in swapcache but if we can't entirely map it
> > (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
> > == nr_pages == 1
>
> No, there would be other references from the swapcache and
> folio_ref_count(folio) > 1. See my other reply.
right. can be clearer by:
@@ -4263,7 +4264,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (should_try_to_free_swap(folio, vma, vmf->flags))
folio_free_swap(folio);
- folio_ref_add(folio, nr_pages - 1);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
pte = mk_pte(page, vma->vm_page_prot);
@@ -4275,14 +4275,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* exclusivity.
*/
if (!folio_test_ksm(folio) &&
- (exclusive || (folio_ref_count(folio) == nr_pages &&
- folio_nr_pages(folio) == nr_pages))) {
+ (exclusive || folio_ref_count(folio) == 1)) {
if (vmf->flags & FAULT_FLAG_WRITE) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE;
}
rmap_flags |= RMAP_EXCLUSIVE;
}
+ folio_ref_add(folio, nr_pages - 1);
flush_icache_pages(vma, page, nr_pages);
if (pte_swp_soft_dirty(vmf->orig_pte))
pte = pte_mksoft_dirty(pte);
>
> --
> Cheers,
>
> David / dhildenb
>
On Tue, May 7, 2024 at 12:38 AM Barry Song <21cnbao@gmail.com> wrote:
>
> On Tue, May 7, 2024 at 12:07 AM David Hildenbrand <david@redhat.com> wrote:
> >
> > On 04.05.24 01:23, Barry Song wrote:
> > > On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
> > >>
> > >> On 03/05/2024 01:50, Barry Song wrote:
> > >>> From: Chuanhua Han <hanchuanhua@oppo.com>
> > >>>
> > >>> When a large folio is found in the swapcache, the current implementation
> > >>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> > >>> page faults. This patch opts to map the entire large folio at once to
> > >>> minimize page faults. Additionally, redundant checks and early exits
> > >>> for ARM64 MTE restoring are removed.
> > >>>
> > >>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> > >>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> > >>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > >>
> > >> With the suggested changes below:
> > >>
> > >> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> > >>
> > >>> ---
> > >>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> > >>> 1 file changed, 48 insertions(+), 12 deletions(-)
> > >>>
> > >>> diff --git a/mm/memory.c b/mm/memory.c
> > >>> index 22e7c33cc747..940fdbe69fa1 100644
> > >>> --- a/mm/memory.c
> > >>> +++ b/mm/memory.c
> > >>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > >>> pte_t pte;
> > >>> vm_fault_t ret = 0;
> > >>> void *shadow = NULL;
> > >>> + int nr_pages = 1;
> > >>> + unsigned long page_idx = 0;
> > >>> + unsigned long address = vmf->address;
> > >>> + pte_t *ptep;
> > >>
> > >> nit: Personally I'd prefer all these to get initialised just before the "if
> > >> (folio_test_large()..." block below. That way it is clear they are fresh (incase
> > >> any logic between here and there makes an adjustment) and its clear that they
> > >> are only to be used after that block (the compiler will warn if using an
> > >> uninitialized value).
> > >
> > > right. I agree this will make the code more readable.
> > >
> > >>
> > >>>
> > >>> if (!pte_unmap_same(vmf))
> > >>> goto out;
> > >>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > >>> goto out_nomap;
> > >>> }
> > >>>
> > >>> + ptep = vmf->pte;
> > >>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> > >>> + int nr = folio_nr_pages(folio);
> > >>> + unsigned long idx = folio_page_idx(folio, page);
> > >>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> > >>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> > >>> + pte_t *folio_ptep;
> > >>> + pte_t folio_pte;
> > >>> +
> > >>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> > >>> + goto check_folio;
> > >>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> > >>> + goto check_folio;
> > >>> +
> > >>> + folio_ptep = vmf->pte - idx;
> > >>> + folio_pte = ptep_get(folio_ptep);
> > >>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> > >>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> > >>> + goto check_folio;
> > >>> +
> > >>> + page_idx = idx;
> > >>> + address = folio_start;
> > >>> + ptep = folio_ptep;
> > >>> + nr_pages = nr;
> > >>> + entry = folio->swap;
> > >>> + page = &folio->page;
> > >>> + }
> > >>> +
> > >>> +check_folio:
> > >>
> > >> Is this still the correct label name, given the checks are now above the new
> > >> block? Perhaps "one_page" or something like that?
> > >
> > > not quite sure about this, as the code after one_page can be multiple_pages.
> > > On the other hand, it seems we are really checking folio after "check_folio"
> > > :-)
> > >
> > >
> > > BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
> > > BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
> > >
> > > /*
> > > * Check under PT lock (to protect against concurrent fork() sharing
> > > * the swap entry concurrently) for certainly exclusive pages.
> > > */
> > > if (!folio_test_ksm(folio)) {
> > >
> > >
> > >>
> > >>> +
> > >>> /*
> > >>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> > >>> * must never point at an anonymous page in the swapcache that is
> > >>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > >>> * We're already holding a reference on the page but haven't mapped it
> > >>> * yet.
> > >>> */
> > >>> - swap_free_nr(entry, 1);
> > >>> + swap_free_nr(entry, nr_pages);
> > >>> if (should_try_to_free_swap(folio, vma, vmf->flags))
> > >>> folio_free_swap(folio);
> > >>>
> > >>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> > >>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> > >>> + folio_ref_add(folio, nr_pages - 1);
> > >>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> > >>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> > >>> pte = mk_pte(page, vma->vm_page_prot);
> > >>>
> > >>> /*
> > >>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > >>> * exclusivity.
> > >>> */
> > >>> if (!folio_test_ksm(folio) &&
> > >>> - (exclusive || folio_ref_count(folio) == 1)) {
> > >>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
> > >>> + folio_nr_pages(folio) == nr_pages))) {
> > >>
> > >> I think in practice there is no change here? If nr_pages > 1 then the folio is
> > >> in the swapcache, so there is an extra ref on it? I agree with the change for
> > >> robustness sake. Just checking my understanding.
> > >
> > > This is the code showing we are reusing/(mkwrite) a folio either
> > > 1. we meet a small folio and we are the only one hitting the small folio
> > > 2. we meet a large folio and we are the only one hitting the large folio
> > >
> > > any corner cases besides the above two seems difficult. for example,
> > >
> > > while we hit a large folio in swapcache but if we can't entirely map it
> > > (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
> > > == nr_pages == 1
> >
> > No, there would be other references from the swapcache and
> > folio_ref_count(folio) > 1. See my other reply.
>
> right. can be clearer by:
Wait, do we still need folio_nr_pages(folio) == nr_pages even if we use
folio_ref_count(folio) == 1 and moving folio_ref_add(folio, nr_pages - 1)?
one case is that we have a large folio with 16 PTEs, and we unmap
15 swap PTE entries, thus we have only one swap entry left. Then
we hit the large folio in swapcache. but we have only one PTE thus we will
map only one PTE. lacking folio_nr_pages(folio) == nr_pages, we reuse the
large folio for one PTE. with it, do_wp_page() will migrate the large
folio to a small one?
1AM, tired and sleepy. not quite sure I am correct.
I look forward to seeing your reply tomorrow morning :-)
>
> @@ -4263,7 +4264,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> if (should_try_to_free_swap(folio, vma, vmf->flags))
> folio_free_swap(folio);
>
> - folio_ref_add(folio, nr_pages - 1);
> add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> pte = mk_pte(page, vma->vm_page_prot);
> @@ -4275,14 +4275,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> * exclusivity.
> */
> if (!folio_test_ksm(folio) &&
> - (exclusive || (folio_ref_count(folio) == nr_pages &&
> - folio_nr_pages(folio) == nr_pages))) {
> + (exclusive || folio_ref_count(folio) == 1)) {
> if (vmf->flags & FAULT_FLAG_WRITE) {
> pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> vmf->flags &= ~FAULT_FLAG_WRITE;
> }
> rmap_flags |= RMAP_EXCLUSIVE;
> }
> + folio_ref_add(folio, nr_pages - 1);
> flush_icache_pages(vma, page, nr_pages);
> if (pte_swp_soft_dirty(vmf->orig_pte))
> pte = pte_mksoft_dirty(pte);
>
>
> >
> > --
> > Cheers,
> >
> > David / dhildenb
> >
On 06.05.24 14:58, Barry Song wrote:
> On Tue, May 7, 2024 at 12:38 AM Barry Song <21cnbao@gmail.com> wrote:
>>
>> On Tue, May 7, 2024 at 12:07 AM David Hildenbrand <david@redhat.com> wrote:
>>>
>>> On 04.05.24 01:23, Barry Song wrote:
>>>> On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
>>>>>
>>>>> On 03/05/2024 01:50, Barry Song wrote:
>>>>>> From: Chuanhua Han <hanchuanhua@oppo.com>
>>>>>>
>>>>>> When a large folio is found in the swapcache, the current implementation
>>>>>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
>>>>>> page faults. This patch opts to map the entire large folio at once to
>>>>>> minimize page faults. Additionally, redundant checks and early exits
>>>>>> for ARM64 MTE restoring are removed.
>>>>>>
>>>>>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
>>>>>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
>>>>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>>>>>
>>>>> With the suggested changes below:
>>>>>
>>>>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>>>>
>>>>>> ---
>>>>>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
>>>>>> 1 file changed, 48 insertions(+), 12 deletions(-)
>>>>>>
>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>> index 22e7c33cc747..940fdbe69fa1 100644
>>>>>> --- a/mm/memory.c
>>>>>> +++ b/mm/memory.c
>>>>>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>> pte_t pte;
>>>>>> vm_fault_t ret = 0;
>>>>>> void *shadow = NULL;
>>>>>> + int nr_pages = 1;
>>>>>> + unsigned long page_idx = 0;
>>>>>> + unsigned long address = vmf->address;
>>>>>> + pte_t *ptep;
>>>>>
>>>>> nit: Personally I'd prefer all these to get initialised just before the "if
>>>>> (folio_test_large()..." block below. That way it is clear they are fresh (incase
>>>>> any logic between here and there makes an adjustment) and its clear that they
>>>>> are only to be used after that block (the compiler will warn if using an
>>>>> uninitialized value).
>>>>
>>>> right. I agree this will make the code more readable.
>>>>
>>>>>
>>>>>>
>>>>>> if (!pte_unmap_same(vmf))
>>>>>> goto out;
>>>>>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>> goto out_nomap;
>>>>>> }
>>>>>>
>>>>>> + ptep = vmf->pte;
>>>>>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
>>>>>> + int nr = folio_nr_pages(folio);
>>>>>> + unsigned long idx = folio_page_idx(folio, page);
>>>>>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
>>>>>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
>>>>>> + pte_t *folio_ptep;
>>>>>> + pte_t folio_pte;
>>>>>> +
>>>>>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
>>>>>> + goto check_folio;
>>>>>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
>>>>>> + goto check_folio;
>>>>>> +
>>>>>> + folio_ptep = vmf->pte - idx;
>>>>>> + folio_pte = ptep_get(folio_ptep);
>>>>>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
>>>>>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
>>>>>> + goto check_folio;
>>>>>> +
>>>>>> + page_idx = idx;
>>>>>> + address = folio_start;
>>>>>> + ptep = folio_ptep;
>>>>>> + nr_pages = nr;
>>>>>> + entry = folio->swap;
>>>>>> + page = &folio->page;
>>>>>> + }
>>>>>> +
>>>>>> +check_folio:
>>>>>
>>>>> Is this still the correct label name, given the checks are now above the new
>>>>> block? Perhaps "one_page" or something like that?
>>>>
>>>> not quite sure about this, as the code after one_page can be multiple_pages.
>>>> On the other hand, it seems we are really checking folio after "check_folio"
>>>> :-)
>>>>
>>>>
>>>> BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
>>>> BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
>>>>
>>>> /*
>>>> * Check under PT lock (to protect against concurrent fork() sharing
>>>> * the swap entry concurrently) for certainly exclusive pages.
>>>> */
>>>> if (!folio_test_ksm(folio)) {
>>>>
>>>>
>>>>>
>>>>>> +
>>>>>> /*
>>>>>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
>>>>>> * must never point at an anonymous page in the swapcache that is
>>>>>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>> * We're already holding a reference on the page but haven't mapped it
>>>>>> * yet.
>>>>>> */
>>>>>> - swap_free_nr(entry, 1);
>>>>>> + swap_free_nr(entry, nr_pages);
>>>>>> if (should_try_to_free_swap(folio, vma, vmf->flags))
>>>>>> folio_free_swap(folio);
>>>>>>
>>>>>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
>>>>>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
>>>>>> + folio_ref_add(folio, nr_pages - 1);
>>>>>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
>>>>>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
>>>>>> pte = mk_pte(page, vma->vm_page_prot);
>>>>>>
>>>>>> /*
>>>>>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>> * exclusivity.
>>>>>> */
>>>>>> if (!folio_test_ksm(folio) &&
>>>>>> - (exclusive || folio_ref_count(folio) == 1)) {
>>>>>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
>>>>>> + folio_nr_pages(folio) == nr_pages))) {
>>>>>
>>>>> I think in practice there is no change here? If nr_pages > 1 then the folio is
>>>>> in the swapcache, so there is an extra ref on it? I agree with the change for
>>>>> robustness sake. Just checking my understanding.
>>>>
>>>> This is the code showing we are reusing/(mkwrite) a folio either
>>>> 1. we meet a small folio and we are the only one hitting the small folio
>>>> 2. we meet a large folio and we are the only one hitting the large folio
>>>>
>>>> any corner cases besides the above two seems difficult. for example,
>>>>
>>>> while we hit a large folio in swapcache but if we can't entirely map it
>>>> (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
>>>> == nr_pages == 1
>>>
>>> No, there would be other references from the swapcache and
>>> folio_ref_count(folio) > 1. See my other reply.
>>
>> right. can be clearer by:
>
> Wait, do we still need folio_nr_pages(folio) == nr_pages even if we use
> folio_ref_count(folio) == 1 and moving folio_ref_add(folio, nr_pages - 1)?
I don't think that we will "need" it.
>
> one case is that we have a large folio with 16 PTEs, and we unmap
> 15 swap PTE entries, thus we have only one swap entry left. Then
> we hit the large folio in swapcache. but we have only one PTE thus we will
> map only one PTE. lacking folio_nr_pages(folio) == nr_pages, we reuse the
> large folio for one PTE. with it, do_wp_page() will migrate the large
> folio to a small one?
We will set PAE bit and do_wp_page() will unconditionally reuse that page.
Note that this is the same as if we had pte_swp_exclusive() set and
would have run into "exclusive=true" here.
If we'd want a similar "optimization" as we have in
wp_can_reuse_anon_folio(), you'd want something like
exclusive || (folio_ref_count(folio) == 1 &&
(!folio_test_large(folio) || nr_pages > 1)
... but I am not sure if that is really worth the complexity here.
>
> 1AM, tired and sleepy. not quite sure I am correct.
> I look forward to seeing your reply tomorrow morning :-)
Heh, no need to dream about this ;)
--
Cheers,
David / dhildenb
On Tue, May 7, 2024 at 1:16 AM David Hildenbrand <david@redhat.com> wrote:
>
> On 06.05.24 14:58, Barry Song wrote:
> > On Tue, May 7, 2024 at 12:38 AM Barry Song <21cnbao@gmail.com> wrote:
> >>
> >> On Tue, May 7, 2024 at 12:07 AM David Hildenbrand <david@redhat.com> wrote:
> >>>
> >>> On 04.05.24 01:23, Barry Song wrote:
> >>>> On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
> >>>>>
> >>>>> On 03/05/2024 01:50, Barry Song wrote:
> >>>>>> From: Chuanhua Han <hanchuanhua@oppo.com>
> >>>>>>
> >>>>>> When a large folio is found in the swapcache, the current implementation
> >>>>>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> >>>>>> page faults. This patch opts to map the entire large folio at once to
> >>>>>> minimize page faults. Additionally, redundant checks and early exits
> >>>>>> for ARM64 MTE restoring are removed.
> >>>>>>
> >>>>>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> >>>>>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> >>>>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> >>>>>
> >>>>> With the suggested changes below:
> >>>>>
> >>>>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> >>>>>
> >>>>>> ---
> >>>>>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> >>>>>> 1 file changed, 48 insertions(+), 12 deletions(-)
> >>>>>>
> >>>>>> diff --git a/mm/memory.c b/mm/memory.c
> >>>>>> index 22e7c33cc747..940fdbe69fa1 100644
> >>>>>> --- a/mm/memory.c
> >>>>>> +++ b/mm/memory.c
> >>>>>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>> pte_t pte;
> >>>>>> vm_fault_t ret = 0;
> >>>>>> void *shadow = NULL;
> >>>>>> + int nr_pages = 1;
> >>>>>> + unsigned long page_idx = 0;
> >>>>>> + unsigned long address = vmf->address;
> >>>>>> + pte_t *ptep;
> >>>>>
> >>>>> nit: Personally I'd prefer all these to get initialised just before the "if
> >>>>> (folio_test_large()..." block below. That way it is clear they are fresh (incase
> >>>>> any logic between here and there makes an adjustment) and its clear that they
> >>>>> are only to be used after that block (the compiler will warn if using an
> >>>>> uninitialized value).
> >>>>
> >>>> right. I agree this will make the code more readable.
> >>>>
> >>>>>
> >>>>>>
> >>>>>> if (!pte_unmap_same(vmf))
> >>>>>> goto out;
> >>>>>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>> goto out_nomap;
> >>>>>> }
> >>>>>>
> >>>>>> + ptep = vmf->pte;
> >>>>>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> >>>>>> + int nr = folio_nr_pages(folio);
> >>>>>> + unsigned long idx = folio_page_idx(folio, page);
> >>>>>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> >>>>>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> >>>>>> + pte_t *folio_ptep;
> >>>>>> + pte_t folio_pte;
> >>>>>> +
> >>>>>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> >>>>>> + goto check_folio;
> >>>>>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> >>>>>> + goto check_folio;
> >>>>>> +
> >>>>>> + folio_ptep = vmf->pte - idx;
> >>>>>> + folio_pte = ptep_get(folio_ptep);
> >>>>>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> >>>>>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> >>>>>> + goto check_folio;
> >>>>>> +
> >>>>>> + page_idx = idx;
> >>>>>> + address = folio_start;
> >>>>>> + ptep = folio_ptep;
> >>>>>> + nr_pages = nr;
> >>>>>> + entry = folio->swap;
> >>>>>> + page = &folio->page;
> >>>>>> + }
> >>>>>> +
> >>>>>> +check_folio:
> >>>>>
> >>>>> Is this still the correct label name, given the checks are now above the new
> >>>>> block? Perhaps "one_page" or something like that?
> >>>>
> >>>> not quite sure about this, as the code after one_page can be multiple_pages.
> >>>> On the other hand, it seems we are really checking folio after "check_folio"
> >>>> :-)
> >>>>
> >>>>
> >>>> BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
> >>>> BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
> >>>>
> >>>> /*
> >>>> * Check under PT lock (to protect against concurrent fork() sharing
> >>>> * the swap entry concurrently) for certainly exclusive pages.
> >>>> */
> >>>> if (!folio_test_ksm(folio)) {
> >>>>
> >>>>
> >>>>>
> >>>>>> +
> >>>>>> /*
> >>>>>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> >>>>>> * must never point at an anonymous page in the swapcache that is
> >>>>>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>> * We're already holding a reference on the page but haven't mapped it
> >>>>>> * yet.
> >>>>>> */
> >>>>>> - swap_free_nr(entry, 1);
> >>>>>> + swap_free_nr(entry, nr_pages);
> >>>>>> if (should_try_to_free_swap(folio, vma, vmf->flags))
> >>>>>> folio_free_swap(folio);
> >>>>>>
> >>>>>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> >>>>>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> >>>>>> + folio_ref_add(folio, nr_pages - 1);
> >>>>>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> >>>>>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> >>>>>> pte = mk_pte(page, vma->vm_page_prot);
> >>>>>>
> >>>>>> /*
> >>>>>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>> * exclusivity.
> >>>>>> */
> >>>>>> if (!folio_test_ksm(folio) &&
> >>>>>> - (exclusive || folio_ref_count(folio) == 1)) {
> >>>>>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
> >>>>>> + folio_nr_pages(folio) == nr_pages))) {
> >>>>>
> >>>>> I think in practice there is no change here? If nr_pages > 1 then the folio is
> >>>>> in the swapcache, so there is an extra ref on it? I agree with the change for
> >>>>> robustness sake. Just checking my understanding.
> >>>>
> >>>> This is the code showing we are reusing/(mkwrite) a folio either
> >>>> 1. we meet a small folio and we are the only one hitting the small folio
> >>>> 2. we meet a large folio and we are the only one hitting the large folio
> >>>>
> >>>> any corner cases besides the above two seems difficult. for example,
> >>>>
> >>>> while we hit a large folio in swapcache but if we can't entirely map it
> >>>> (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
> >>>> == nr_pages == 1
> >>>
> >>> No, there would be other references from the swapcache and
> >>> folio_ref_count(folio) > 1. See my other reply.
> >>
> >> right. can be clearer by:
> >
> > Wait, do we still need folio_nr_pages(folio) == nr_pages even if we use
> > folio_ref_count(folio) == 1 and moving folio_ref_add(folio, nr_pages - 1)?
>
> I don't think that we will "need" it.
>
> >
> > one case is that we have a large folio with 16 PTEs, and we unmap
> > 15 swap PTE entries, thus we have only one swap entry left. Then
> > we hit the large folio in swapcache. but we have only one PTE thus we will
> > map only one PTE. lacking folio_nr_pages(folio) == nr_pages, we reuse the
> > large folio for one PTE. with it, do_wp_page() will migrate the large
> > folio to a small one?
>
> We will set PAE bit and do_wp_page() will unconditionally reuse that page.
>
> Note that this is the same as if we had pte_swp_exclusive() set and
> would have run into "exclusive=true" here.
>
> If we'd want a similar "optimization" as we have in
> wp_can_reuse_anon_folio(), you'd want something like
>
> exclusive || (folio_ref_count(folio) == 1 &&
> (!folio_test_large(folio) || nr_pages > 1)
I feel like
A : !folio_test_large(folio) || nr_pages > 1
equals
B: folio_nr_pages(folio) == nr_pages
if folio is small, folio_test_large(folio) is false, both A and B will be true;
if folio is large, and we map the whole large folio, A will be true
because of nr_pages > 1;
B is also true;
if folio is large, and we map single one PTE, A will be false;
B is also false, because nr_pages == 1 but folio_nr_pages(folio) > 1;
right?
However, I agree that delving into this complexity might not be necessary
at the moment.
>
> ... but I am not sure if that is really worth the complexity here.
>
> >
> > 1AM, tired and sleepy. not quite sure I am correct.
> > I look forward to seeing your reply tomorrow morning :-)
>
> Heh, no need to dream about this ;)
>
> --
> Cheers,
>
> David / dhildenb
Thanks
Barry
On 07.05.24 00:58, Barry Song wrote:
> On Tue, May 7, 2024 at 1:16 AM David Hildenbrand <david@redhat.com> wrote:
>>
>> On 06.05.24 14:58, Barry Song wrote:
>>> On Tue, May 7, 2024 at 12:38 AM Barry Song <21cnbao@gmail.com> wrote:
>>>>
>>>> On Tue, May 7, 2024 at 12:07 AM David Hildenbrand <david@redhat.com> wrote:
>>>>>
>>>>> On 04.05.24 01:23, Barry Song wrote:
>>>>>> On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
>>>>>>>
>>>>>>> On 03/05/2024 01:50, Barry Song wrote:
>>>>>>>> From: Chuanhua Han <hanchuanhua@oppo.com>
>>>>>>>>
>>>>>>>> When a large folio is found in the swapcache, the current implementation
>>>>>>>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
>>>>>>>> page faults. This patch opts to map the entire large folio at once to
>>>>>>>> minimize page faults. Additionally, redundant checks and early exits
>>>>>>>> for ARM64 MTE restoring are removed.
>>>>>>>>
>>>>>>>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
>>>>>>>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
>>>>>>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>>>>>>>
>>>>>>> With the suggested changes below:
>>>>>>>
>>>>>>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>>>>>>
>>>>>>>> ---
>>>>>>>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
>>>>>>>> 1 file changed, 48 insertions(+), 12 deletions(-)
>>>>>>>>
>>>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>>>> index 22e7c33cc747..940fdbe69fa1 100644
>>>>>>>> --- a/mm/memory.c
>>>>>>>> +++ b/mm/memory.c
>>>>>>>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>>>> pte_t pte;
>>>>>>>> vm_fault_t ret = 0;
>>>>>>>> void *shadow = NULL;
>>>>>>>> + int nr_pages = 1;
>>>>>>>> + unsigned long page_idx = 0;
>>>>>>>> + unsigned long address = vmf->address;
>>>>>>>> + pte_t *ptep;
>>>>>>>
>>>>>>> nit: Personally I'd prefer all these to get initialised just before the "if
>>>>>>> (folio_test_large()..." block below. That way it is clear they are fresh (incase
>>>>>>> any logic between here and there makes an adjustment) and its clear that they
>>>>>>> are only to be used after that block (the compiler will warn if using an
>>>>>>> uninitialized value).
>>>>>>
>>>>>> right. I agree this will make the code more readable.
>>>>>>
>>>>>>>
>>>>>>>>
>>>>>>>> if (!pte_unmap_same(vmf))
>>>>>>>> goto out;
>>>>>>>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>>>> goto out_nomap;
>>>>>>>> }
>>>>>>>>
>>>>>>>> + ptep = vmf->pte;
>>>>>>>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
>>>>>>>> + int nr = folio_nr_pages(folio);
>>>>>>>> + unsigned long idx = folio_page_idx(folio, page);
>>>>>>>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
>>>>>>>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
>>>>>>>> + pte_t *folio_ptep;
>>>>>>>> + pte_t folio_pte;
>>>>>>>> +
>>>>>>>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
>>>>>>>> + goto check_folio;
>>>>>>>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
>>>>>>>> + goto check_folio;
>>>>>>>> +
>>>>>>>> + folio_ptep = vmf->pte - idx;
>>>>>>>> + folio_pte = ptep_get(folio_ptep);
>>>>>>>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
>>>>>>>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
>>>>>>>> + goto check_folio;
>>>>>>>> +
>>>>>>>> + page_idx = idx;
>>>>>>>> + address = folio_start;
>>>>>>>> + ptep = folio_ptep;
>>>>>>>> + nr_pages = nr;
>>>>>>>> + entry = folio->swap;
>>>>>>>> + page = &folio->page;
>>>>>>>> + }
>>>>>>>> +
>>>>>>>> +check_folio:
>>>>>>>
>>>>>>> Is this still the correct label name, given the checks are now above the new
>>>>>>> block? Perhaps "one_page" or something like that?
>>>>>>
>>>>>> not quite sure about this, as the code after one_page can be multiple_pages.
>>>>>> On the other hand, it seems we are really checking folio after "check_folio"
>>>>>> :-)
>>>>>>
>>>>>>
>>>>>> BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
>>>>>> BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
>>>>>>
>>>>>> /*
>>>>>> * Check under PT lock (to protect against concurrent fork() sharing
>>>>>> * the swap entry concurrently) for certainly exclusive pages.
>>>>>> */
>>>>>> if (!folio_test_ksm(folio)) {
>>>>>>
>>>>>>
>>>>>>>
>>>>>>>> +
>>>>>>>> /*
>>>>>>>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
>>>>>>>> * must never point at an anonymous page in the swapcache that is
>>>>>>>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>>>> * We're already holding a reference on the page but haven't mapped it
>>>>>>>> * yet.
>>>>>>>> */
>>>>>>>> - swap_free_nr(entry, 1);
>>>>>>>> + swap_free_nr(entry, nr_pages);
>>>>>>>> if (should_try_to_free_swap(folio, vma, vmf->flags))
>>>>>>>> folio_free_swap(folio);
>>>>>>>>
>>>>>>>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
>>>>>>>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
>>>>>>>> + folio_ref_add(folio, nr_pages - 1);
>>>>>>>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
>>>>>>>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
>>>>>>>> pte = mk_pte(page, vma->vm_page_prot);
>>>>>>>>
>>>>>>>> /*
>>>>>>>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>>>>>>>> * exclusivity.
>>>>>>>> */
>>>>>>>> if (!folio_test_ksm(folio) &&
>>>>>>>> - (exclusive || folio_ref_count(folio) == 1)) {
>>>>>>>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
>>>>>>>> + folio_nr_pages(folio) == nr_pages))) {
>>>>>>>
>>>>>>> I think in practice there is no change here? If nr_pages > 1 then the folio is
>>>>>>> in the swapcache, so there is an extra ref on it? I agree with the change for
>>>>>>> robustness sake. Just checking my understanding.
>>>>>>
>>>>>> This is the code showing we are reusing/(mkwrite) a folio either
>>>>>> 1. we meet a small folio and we are the only one hitting the small folio
>>>>>> 2. we meet a large folio and we are the only one hitting the large folio
>>>>>>
>>>>>> any corner cases besides the above two seems difficult. for example,
>>>>>>
>>>>>> while we hit a large folio in swapcache but if we can't entirely map it
>>>>>> (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
>>>>>> == nr_pages == 1
>>>>>
>>>>> No, there would be other references from the swapcache and
>>>>> folio_ref_count(folio) > 1. See my other reply.
>>>>
>>>> right. can be clearer by:
>>>
>>> Wait, do we still need folio_nr_pages(folio) == nr_pages even if we use
>>> folio_ref_count(folio) == 1 and moving folio_ref_add(folio, nr_pages - 1)?
>>
>> I don't think that we will "need" it.
>>
>>>
>>> one case is that we have a large folio with 16 PTEs, and we unmap
>>> 15 swap PTE entries, thus we have only one swap entry left. Then
>>> we hit the large folio in swapcache. but we have only one PTE thus we will
>>> map only one PTE. lacking folio_nr_pages(folio) == nr_pages, we reuse the
>>> large folio for one PTE. with it, do_wp_page() will migrate the large
>>> folio to a small one?
>>
>> We will set PAE bit and do_wp_page() will unconditionally reuse that page.
>>
>> Note that this is the same as if we had pte_swp_exclusive() set and
>> would have run into "exclusive=true" here.
>>
>> If we'd want a similar "optimization" as we have in
>> wp_can_reuse_anon_folio(), you'd want something like
>>
>> exclusive || (folio_ref_count(folio) == 1 &&
>> (!folio_test_large(folio) || nr_pages > 1)
>
> I feel like
>
> A : !folio_test_large(folio) || nr_pages > 1
>
> equals
>
> B: folio_nr_pages(folio) == nr_pages
>
> if folio is small, folio_test_large(folio) is false, both A and B will be true;
> if folio is large, and we map the whole large folio, A will be true
> because of nr_pages > 1;
> B is also true;
> if folio is large, and we map single one PTE, A will be false;
> B is also false, because nr_pages == 1 but folio_nr_pages(folio) > 1;
>
> right?
Let's assume a single subpage of a large folio is no longer mapped.
Then, we'd have:
nr_pages == folio_nr_pages(folio) - 1.
You could simply map+reuse most of the folio without COWing.
Once we support COW reuse of PTE-mapped THP we'd do the same. Here, it's
just easy to detect that the folio is exclusive (folio_ref_count(folio)
== 1 before mapping anything).
If you really want to mimic what do_wp_page() currently does, you should
have:
exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio))
Personally, I think we should keep it simple here and use:
exclusive || folio_ref_count(folio) == 1
IMHO, that's as clear as it gets.
--
Cheers,
David / dhildenb
On Tue, May 7, 2024 at 8:24 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 07.05.24 00:58, Barry Song wrote:
> > On Tue, May 7, 2024 at 1:16 AM David Hildenbrand <david@redhat.com> wrote:
> >>
> >> On 06.05.24 14:58, Barry Song wrote:
> >>> On Tue, May 7, 2024 at 12:38 AM Barry Song <21cnbao@gmail.com> wrote:
> >>>>
> >>>> On Tue, May 7, 2024 at 12:07 AM David Hildenbrand <david@redhat.com> wrote:
> >>>>>
> >>>>> On 04.05.24 01:23, Barry Song wrote:
> >>>>>> On Fri, May 3, 2024 at 6:50 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
> >>>>>>>
> >>>>>>> On 03/05/2024 01:50, Barry Song wrote:
> >>>>>>>> From: Chuanhua Han <hanchuanhua@oppo.com>
> >>>>>>>>
> >>>>>>>> When a large folio is found in the swapcache, the current implementation
> >>>>>>>> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> >>>>>>>> page faults. This patch opts to map the entire large folio at once to
> >>>>>>>> minimize page faults. Additionally, redundant checks and early exits
> >>>>>>>> for ARM64 MTE restoring are removed.
> >>>>>>>>
> >>>>>>>> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> >>>>>>>> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> >>>>>>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> >>>>>>>
> >>>>>>> With the suggested changes below:
> >>>>>>>
> >>>>>>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> >>>>>>>
> >>>>>>>> ---
> >>>>>>>> mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
> >>>>>>>> 1 file changed, 48 insertions(+), 12 deletions(-)
> >>>>>>>>
> >>>>>>>> diff --git a/mm/memory.c b/mm/memory.c
> >>>>>>>> index 22e7c33cc747..940fdbe69fa1 100644
> >>>>>>>> --- a/mm/memory.c
> >>>>>>>> +++ b/mm/memory.c
> >>>>>>>> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>>>> pte_t pte;
> >>>>>>>> vm_fault_t ret = 0;
> >>>>>>>> void *shadow = NULL;
> >>>>>>>> + int nr_pages = 1;
> >>>>>>>> + unsigned long page_idx = 0;
> >>>>>>>> + unsigned long address = vmf->address;
> >>>>>>>> + pte_t *ptep;
> >>>>>>>
> >>>>>>> nit: Personally I'd prefer all these to get initialised just before the "if
> >>>>>>> (folio_test_large()..." block below. That way it is clear they are fresh (incase
> >>>>>>> any logic between here and there makes an adjustment) and its clear that they
> >>>>>>> are only to be used after that block (the compiler will warn if using an
> >>>>>>> uninitialized value).
> >>>>>>
> >>>>>> right. I agree this will make the code more readable.
> >>>>>>
> >>>>>>>
> >>>>>>>>
> >>>>>>>> if (!pte_unmap_same(vmf))
> >>>>>>>> goto out;
> >>>>>>>> @@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>>>> goto out_nomap;
> >>>>>>>> }
> >>>>>>>>
> >>>>>>>> + ptep = vmf->pte;
> >>>>>>>> + if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> >>>>>>>> + int nr = folio_nr_pages(folio);
> >>>>>>>> + unsigned long idx = folio_page_idx(folio, page);
> >>>>>>>> + unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
> >>>>>>>> + unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> >>>>>>>> + pte_t *folio_ptep;
> >>>>>>>> + pte_t folio_pte;
> >>>>>>>> +
> >>>>>>>> + if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> >>>>>>>> + goto check_folio;
> >>>>>>>> + if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> >>>>>>>> + goto check_folio;
> >>>>>>>> +
> >>>>>>>> + folio_ptep = vmf->pte - idx;
> >>>>>>>> + folio_pte = ptep_get(folio_ptep);
> >>>>>>>> + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> >>>>>>>> + swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> >>>>>>>> + goto check_folio;
> >>>>>>>> +
> >>>>>>>> + page_idx = idx;
> >>>>>>>> + address = folio_start;
> >>>>>>>> + ptep = folio_ptep;
> >>>>>>>> + nr_pages = nr;
> >>>>>>>> + entry = folio->swap;
> >>>>>>>> + page = &folio->page;
> >>>>>>>> + }
> >>>>>>>> +
> >>>>>>>> +check_folio:
> >>>>>>>
> >>>>>>> Is this still the correct label name, given the checks are now above the new
> >>>>>>> block? Perhaps "one_page" or something like that?
> >>>>>>
> >>>>>> not quite sure about this, as the code after one_page can be multiple_pages.
> >>>>>> On the other hand, it seems we are really checking folio after "check_folio"
> >>>>>> :-)
> >>>>>>
> >>>>>>
> >>>>>> BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
> >>>>>> BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
> >>>>>>
> >>>>>> /*
> >>>>>> * Check under PT lock (to protect against concurrent fork() sharing
> >>>>>> * the swap entry concurrently) for certainly exclusive pages.
> >>>>>> */
> >>>>>> if (!folio_test_ksm(folio)) {
> >>>>>>
> >>>>>>
> >>>>>>>
> >>>>>>>> +
> >>>>>>>> /*
> >>>>>>>> * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
> >>>>>>>> * must never point at an anonymous page in the swapcache that is
> >>>>>>>> @@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>>>> * We're already holding a reference on the page but haven't mapped it
> >>>>>>>> * yet.
> >>>>>>>> */
> >>>>>>>> - swap_free_nr(entry, 1);
> >>>>>>>> + swap_free_nr(entry, nr_pages);
> >>>>>>>> if (should_try_to_free_swap(folio, vma, vmf->flags))
> >>>>>>>> folio_free_swap(folio);
> >>>>>>>>
> >>>>>>>> - inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> >>>>>>>> - dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> >>>>>>>> + folio_ref_add(folio, nr_pages - 1);
> >>>>>>>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> >>>>>>>> + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> >>>>>>>> pte = mk_pte(page, vma->vm_page_prot);
> >>>>>>>>
> >>>>>>>> /*
> >>>>>>>> @@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >>>>>>>> * exclusivity.
> >>>>>>>> */
> >>>>>>>> if (!folio_test_ksm(folio) &&
> >>>>>>>> - (exclusive || folio_ref_count(folio) == 1)) {
> >>>>>>>> + (exclusive || (folio_ref_count(folio) == nr_pages &&
> >>>>>>>> + folio_nr_pages(folio) == nr_pages))) {
> >>>>>>>
> >>>>>>> I think in practice there is no change here? If nr_pages > 1 then the folio is
> >>>>>>> in the swapcache, so there is an extra ref on it? I agree with the change for
> >>>>>>> robustness sake. Just checking my understanding.
> >>>>>>
> >>>>>> This is the code showing we are reusing/(mkwrite) a folio either
> >>>>>> 1. we meet a small folio and we are the only one hitting the small folio
> >>>>>> 2. we meet a large folio and we are the only one hitting the large folio
> >>>>>>
> >>>>>> any corner cases besides the above two seems difficult. for example,
> >>>>>>
> >>>>>> while we hit a large folio in swapcache but if we can't entirely map it
> >>>>>> (nr_pages==1) due to partial unmap, we will have folio_ref_count(folio)
> >>>>>> == nr_pages == 1
> >>>>>
> >>>>> No, there would be other references from the swapcache and
> >>>>> folio_ref_count(folio) > 1. See my other reply.
> >>>>
> >>>> right. can be clearer by:
> >>>
> >>> Wait, do we still need folio_nr_pages(folio) == nr_pages even if we use
> >>> folio_ref_count(folio) == 1 and moving folio_ref_add(folio, nr_pages - 1)?
> >>
> >> I don't think that we will "need" it.
> >>
> >>>
> >>> one case is that we have a large folio with 16 PTEs, and we unmap
> >>> 15 swap PTE entries, thus we have only one swap entry left. Then
> >>> we hit the large folio in swapcache. but we have only one PTE thus we will
> >>> map only one PTE. lacking folio_nr_pages(folio) == nr_pages, we reuse the
> >>> large folio for one PTE. with it, do_wp_page() will migrate the large
> >>> folio to a small one?
> >>
> >> We will set PAE bit and do_wp_page() will unconditionally reuse that page.
> >>
> >> Note that this is the same as if we had pte_swp_exclusive() set and
> >> would have run into "exclusive=true" here.
> >>
> >> If we'd want a similar "optimization" as we have in
> >> wp_can_reuse_anon_folio(), you'd want something like
> >>
> >> exclusive || (folio_ref_count(folio) == 1 &&
> >> (!folio_test_large(folio) || nr_pages > 1)
> >
> > I feel like
> >
> > A : !folio_test_large(folio) || nr_pages > 1
> >
> > equals
> >
> > B: folio_nr_pages(folio) == nr_pages
> >
> > if folio is small, folio_test_large(folio) is false, both A and B will be true;
> > if folio is large, and we map the whole large folio, A will be true
> > because of nr_pages > 1;
> > B is also true;
> > if folio is large, and we map single one PTE, A will be false;
> > B is also false, because nr_pages == 1 but folio_nr_pages(folio) > 1;
> >
> > right?
>
> Let's assume a single subpage of a large folio is no longer mapped.
> Then, we'd have:
>
> nr_pages == folio_nr_pages(folio) - 1.
>
> You could simply map+reuse most of the folio without COWing.
yes. This is good but the pte which is no longer mapped could be
anyone within the nr_pages PTEs. so it could be quite tricky for
set_ptes.
>
> Once we support COW reuse of PTE-mapped THP we'd do the same. Here, it's
> just easy to detect that the folio is exclusive (folio_ref_count(folio)
> == 1 before mapping anything).
>
> If you really want to mimic what do_wp_page() currently does, you should
> have:
>
> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio))
I actually dislike the part that do_wp_page() handles the reuse of a large
folio which is entirely mapped. For example, A forks B, B exit, we write
A's large folio, we get nr_pages CoW of small folios. Ideally, we can
reuse the whole folios for writing.
>
> Personally, I think we should keep it simple here and use:
>
> exclusive || folio_ref_count(folio) == 1
I feel this is still better than
exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio))
as we reuse the whole large folio. the do_wp_page() behaviour
doesn't have this.
>
> IMHO, that's as clear as it gets.
I agree this is clear. But I wonder if there is a possibility to optimize.
Using your instance,
"Let's assume a single subpage of a large folio is no longer mapped."
For a large folio with 16 PTEs and in case we have unmapped one of them.
Thus, we have 15 swap entries left.
The first PTE which gets page faults will reuse the whole large folio having
"exclusive || folio_ref_count(folio) == 1" only. The left 14 will
allocate 14 small
folios(swapcache has been dropped), thus, we use 16 + 14 = 30 pages
memory.
with either
A : !folio_test_large(folio) || nr_pages > 1
or
B: folio_nr_pages(folio) == nr_pages
We consume 15 pages.
>
> --
> Cheers,
>
> David / dhildenb
Thanks
Barry
>> Let's assume a single subpage of a large folio is no longer mapped. >> Then, we'd have: >> >> nr_pages == folio_nr_pages(folio) - 1. >> >> You could simply map+reuse most of the folio without COWing. > > yes. This is good but the pte which is no longer mapped could be > anyone within the nr_pages PTEs. so it could be quite tricky for > set_ptes. The swap batching logic should take care of that, otherwise it would be buggy. > >> >> Once we support COW reuse of PTE-mapped THP we'd do the same. Here, it's >> just easy to detect that the folio is exclusive (folio_ref_count(folio) >> == 1 before mapping anything). >> >> If you really want to mimic what do_wp_page() currently does, you should >> have: >> >> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio)) > > I actually dislike the part that do_wp_page() handles the reuse of a large > folio which is entirely mapped. For example, A forks B, B exit, we write > A's large folio, we get nr_pages CoW of small folios. Ideally, we can > reuse the whole folios for writing. Yes, see the link I shared to what I am working on. There isn't really a question if what we do right now needs to be improved and all these scenarios are pretty obvious clear. > >> >> Personally, I think we should keep it simple here and use: >> >> exclusive || folio_ref_count(folio) == 1 > > I feel this is still better than > exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio)) > as we reuse the whole large folio. the do_wp_page() behaviour > doesn't have this. Yes, but there is the comment "Same logic as in do_wp_page();". We already ran into issues having different COW reuse logic all over the place. For this case here, I don't care if we leave it as "exclusive || folio_ref_count(folio) == 1" But let's not try inventing new stuff here. -- Cheers, David / dhildenb
On Tue, May 7, 2024 at 8:59 PM David Hildenbrand <david@redhat.com> wrote: > > >> Let's assume a single subpage of a large folio is no longer mapped. > >> Then, we'd have: > >> > >> nr_pages == folio_nr_pages(folio) - 1. > >> > >> You could simply map+reuse most of the folio without COWing. > > > > yes. This is good but the pte which is no longer mapped could be > > anyone within the nr_pages PTEs. so it could be quite tricky for > > set_ptes. > > The swap batching logic should take care of that, otherwise it would be > buggy. When you mention "it would be buggy," are you also referring to the current fallback approach? or only refer to the future patch which might be able to map/reuse "nr_pages - 1" pages? The current patch falls back to setting nr_pages = 1 without mapping or reusing nr_pages - 1. I feel your concern doesn't refer to this fallback? > > > > >> > >> Once we support COW reuse of PTE-mapped THP we'd do the same. Here, it's > >> just easy to detect that the folio is exclusive (folio_ref_count(folio) > >> == 1 before mapping anything). > >> > >> If you really want to mimic what do_wp_page() currently does, you should > >> have: > >> > >> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio)) > > > > I actually dislike the part that do_wp_page() handles the reuse of a large > > folio which is entirely mapped. For example, A forks B, B exit, we write > > A's large folio, we get nr_pages CoW of small folios. Ideally, we can > > reuse the whole folios for writing. > > Yes, see the link I shared to what I am working on. There isn't really a > question if what we do right now needs to be improved and all these > scenarios are pretty obvious clear. Great! I plan to dedicate more time to reviewing your work. > > > > >> > >> Personally, I think we should keep it simple here and use: > >> > >> exclusive || folio_ref_count(folio) == 1 > > > > I feel this is still better than > > exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio)) > > as we reuse the whole large folio. the do_wp_page() behaviour > > doesn't have this. > > Yes, but there is the comment "Same logic as in do_wp_page();". We > already ran into issues having different COW reuse logic all over the > place. For this case here, I don't care if we leave it as > > "exclusive || folio_ref_count(folio) == 1" I'm perfectly fine with using the code for this patchset and maybe looking for other opportunities for potential optimization as an incremental patchset, for example, reusing the remaining PTEs as suggested by you - "simply map+reuse most of the folio without COWing." > > But let's not try inventing new stuff here. It seems you ignored and snipped my "16 + 14" pages and "15" pages example though. but once we support "simply map+reuse most of the folio without COWing", the "16+14" problem can be resolved, instead, we consume 16 pages. > > -- > Cheers, > > David / dhildenb Thanks Barry
On 07.05.24 11:24, Barry Song wrote: > On Tue, May 7, 2024 at 8:59 PM David Hildenbrand <david@redhat.com> wrote: >> >>>> Let's assume a single subpage of a large folio is no longer mapped. >>>> Then, we'd have: >>>> >>>> nr_pages == folio_nr_pages(folio) - 1. >>>> >>>> You could simply map+reuse most of the folio without COWing. >>> >>> yes. This is good but the pte which is no longer mapped could be >>> anyone within the nr_pages PTEs. so it could be quite tricky for >>> set_ptes. >> >> The swap batching logic should take care of that, otherwise it would be >> buggy. > > When you mention "it would be buggy," are you also referring to the current > fallback approach? or only refer to the future patch which might be able > to map/reuse "nr_pages - 1" pages? swap_pte_batch() should not skip any holes. So consequently, set_ptes() should do the right thing. (regarding your comment "could be quite ricky for set_ptes") So I think that should be working as expected. > > The current patch falls back to setting nr_pages = 1 without mapping or > reusing nr_pages - 1. I feel your concern doesn't refer to this fallback? > >> >>> >>>> >>>> Once we support COW reuse of PTE-mapped THP we'd do the same. Here, it's >>>> just easy to detect that the folio is exclusive (folio_ref_count(folio) >>>> == 1 before mapping anything). >>>> >>>> If you really want to mimic what do_wp_page() currently does, you should >>>> have: >>>> >>>> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio)) >>> >>> I actually dislike the part that do_wp_page() handles the reuse of a large >>> folio which is entirely mapped. For example, A forks B, B exit, we write >>> A's large folio, we get nr_pages CoW of small folios. Ideally, we can >>> reuse the whole folios for writing. >> >> Yes, see the link I shared to what I am working on. There isn't really a >> question if what we do right now needs to be improved and all these >> scenarios are pretty obvious clear. > > Great! I plan to dedicate more time to reviewing your work. Nice! And there will be a lot of follow-up optimization work I won't tackle immediately regarding COW (COW-reuse around, maybe sometimes we want to COW bigger chunks). I still have making PageAnonExclusive a per-folio flag on my TODO list, that will help the COW-reuse around case a lot. > >> >>> >>>> >>>> Personally, I think we should keep it simple here and use: >>>> >>>> exclusive || folio_ref_count(folio) == 1 >>> >>> I feel this is still better than >>> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio)) >>> as we reuse the whole large folio. the do_wp_page() behaviour >>> doesn't have this. >> >> Yes, but there is the comment "Same logic as in do_wp_page();". We >> already ran into issues having different COW reuse logic all over the >> place. For this case here, I don't care if we leave it as >> >> "exclusive || folio_ref_count(folio) == 1" > > I'm perfectly fine with using the code for this patchset and maybe > looking for other > opportunities for potential optimization as an incremental patchset, > for example, > reusing the remaining PTEs as suggested by you - "simply map+reuse most of > the folio without COWing." > >> >> But let's not try inventing new stuff here. > > It seems you ignored and snipped my "16 + 14" pages and "15" pages > example though. but once we support "simply map+reuse most of the > folio without COWing", the "16+14" problem can be resolved, instead, > we consume 16 pages. Oh, sorry for skipping that, for me it was rather clear: the partially mapped folios will be on the deferred split list and the excess memory can (and will be) reclaimed when there is need. So this temporary memory consumption is usually not a problem in practice. But yes, something to optimize (just like COW reuse in general). -- Cheers, David / dhildenb
On Tue, May 7, 2024 at 6:39 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 07.05.24 11:24, Barry Song wrote:
> > On Tue, May 7, 2024 at 8:59 PM David Hildenbrand <david@redhat.com> wrote:
> >>
> >>>> Let's assume a single subpage of a large folio is no longer mapped.
> >>>> Then, we'd have:
> >>>>
> >>>> nr_pages == folio_nr_pages(folio) - 1.
> >>>>
> >>>> You could simply map+reuse most of the folio without COWing.
> >>>
> >>> yes. This is good but the pte which is no longer mapped could be
> >>> anyone within the nr_pages PTEs. so it could be quite tricky for
> >>> set_ptes.
> >>
> >> The swap batching logic should take care of that, otherwise it would be
> >> buggy.
> >
> > When you mention "it would be buggy," are you also referring to the current
> > fallback approach? or only refer to the future patch which might be able
> > to map/reuse "nr_pages - 1" pages?
>
> swap_pte_batch() should not skip any holes. So consequently, set_ptes()
> should do the right thing. (regarding your comment "could be quite ricky
> for set_ptes")
>
> So I think that should be working as expected.
maybe not. take a look at my current code, I am goto check_folio with
nr_pages = 1
if swap_pte_batch(folio_ptep, nr, folio_pte) != folio_nr_pages(folio).
+ nr_pages = 1;
+ ...
+ if (folio_test_large(folio) && folio_test_swapcache(folio)) {
+ int nr = folio_nr_pages(folio);
+ ...
+ if (!pte_same(folio_pte,
pte_move_swp_offset(vmf->orig_pte, -idx)) ||
+ swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
+ goto check_folio; /* read here, i am falling
back nr_pages = 1 */
+
+
+ ...
+ nr_pages = nr;
The fallback(=1) works. but it seems you are proposing set nr_pages =
swap_pte_batch(folio_ptep, nr, folio_pte)
if (swap_pte_batch(folio_ptep, nr, folio_pte) > 1 &&
swap_pte_batch(folio_ptep, nr, folio_pte) <
nr_pages) ?
>
> >
> > The current patch falls back to setting nr_pages = 1 without mapping or
> > reusing nr_pages - 1. I feel your concern doesn't refer to this fallback?
> >
> >>
> >>>
> >>>>
> >>>> Once we support COW reuse of PTE-mapped THP we'd do the same. Here, it's
> >>>> just easy to detect that the folio is exclusive (folio_ref_count(folio)
> >>>> == 1 before mapping anything).
> >>>>
> >>>> If you really want to mimic what do_wp_page() currently does, you should
> >>>> have:
> >>>>
> >>>> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio))
> >>>
> >>> I actually dislike the part that do_wp_page() handles the reuse of a large
> >>> folio which is entirely mapped. For example, A forks B, B exit, we write
> >>> A's large folio, we get nr_pages CoW of small folios. Ideally, we can
> >>> reuse the whole folios for writing.
> >>
> >> Yes, see the link I shared to what I am working on. There isn't really a
> >> question if what we do right now needs to be improved and all these
> >> scenarios are pretty obvious clear.
> >
> > Great! I plan to dedicate more time to reviewing your work.
>
> Nice! And there will be a lot of follow-up optimization work I won't
> tackle immediately regarding COW (COW-reuse around, maybe sometimes we
> want to COW bigger chunks).
>
> I still have making PageAnonExclusive a per-folio flag on my TODO list,
> that will help the COW-reuse around case a lot.
>
> >
> >>
> >>>
> >>>>
> >>>> Personally, I think we should keep it simple here and use:
> >>>>
> >>>> exclusive || folio_ref_count(folio) == 1
> >>>
> >>> I feel this is still better than
> >>> exclusive || (folio_ref_count(folio) == 1 && !folio_test_large(folio))
> >>> as we reuse the whole large folio. the do_wp_page() behaviour
> >>> doesn't have this.
> >>
> >> Yes, but there is the comment "Same logic as in do_wp_page();". We
> >> already ran into issues having different COW reuse logic all over the
> >> place. For this case here, I don't care if we leave it as
> >>
> >> "exclusive || folio_ref_count(folio) == 1"
> >
> > I'm perfectly fine with using the code for this patchset and maybe
> > looking for other
> > opportunities for potential optimization as an incremental patchset,
> > for example,
> > reusing the remaining PTEs as suggested by you - "simply map+reuse most of
> > the folio without COWing."
> >
> >>
> >> But let's not try inventing new stuff here.
> >
> > It seems you ignored and snipped my "16 + 14" pages and "15" pages
> > example though. but once we support "simply map+reuse most of the
> > folio without COWing", the "16+14" problem can be resolved, instead,
> > we consume 16 pages.
>
>
> Oh, sorry for skipping that, for me it was rather clear: the partially
> mapped folios will be on the deferred split list and the excess memory
> can (and will be) reclaimed when there is need. So this temporary memory
> consumption is usually not a problem in practice. But yes, something to
> optimize (just like COW reuse in general).
>
> --
> Cheers,
>
> David / dhildenb
>
© 2016 - 2025 Red Hat, Inc.