From: Kiryl Shutsemau <kas@kernel.org>
Currently, try_to_unmap_once() only tries to mlock small folios.
Use logic similar to folio_referenced_one() to mlock large folios:
only do this for fully mapped folios and under page table lock that
protects all page table entries.
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
---
mm/rmap.c | 31 ++++++++++++++++++++++++++++---
1 file changed, 28 insertions(+), 3 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index 3d0235f332de..a55c3bf41287 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1870,6 +1870,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long nr_pages = 1, end_addr;
unsigned long pfn;
unsigned long hsz = 0;
+ int ptes = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
@@ -1910,10 +1911,34 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
if (!(flags & TTU_IGNORE_MLOCK) &&
(vma->vm_flags & VM_LOCKED)) {
+ ptes++;
+
+ /*
+ * Set 'ret' to indicate the page cannot be unmapped.
+ *
+ * Do not jump to walk_abort immediately as additional
+ * iteration might be required to detect fully mapped
+ * folio an mlock it.
+ */
+ ret = false;
+
+ /* Only mlock fully mapped pages */
+ if (pvmw.pte && ptes != pvmw.nr_pages)
+ continue;
+
+ /*
+ * All PTEs must be protected by page table lock in
+ * order to mlock the page.
+ *
+ * If page table boundary has been cross, current ptl
+ * only protect part of ptes.
+ */
+ if (pvmw.flags & PVMW_PGTABLE_CROSSSED)
+ goto walk_done;
+
/* Restore the mlock which got missed */
- if (!folio_test_large(folio))
- mlock_vma_folio(folio, vma);
- goto walk_abort;
+ mlock_vma_folio(folio, vma);
+ goto walk_done;
}
if (!pvmw.pte) {
--
2.50.1