[PATCH v2 1/2] mm/mprotect: move softleaf code out of the main function

Pedro Falcato posted 2 patches 1 week, 2 days ago
There is a newer version of this series
[PATCH v2 1/2] mm/mprotect: move softleaf code out of the main function
Posted by Pedro Falcato 1 week, 2 days ago
Move softleaf change_pte_range code into a separate function. This makes
the change_pte_range() function a good bit smaller, and lessens cognitive
load when reading through the function.

Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Signed-off-by: Pedro Falcato <pfalcato@suse.de>
---
 mm/mprotect.c | 129 +++++++++++++++++++++++++++-----------------------
 1 file changed, 69 insertions(+), 60 deletions(-)

diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9681f055b9fc..2eaf862e5734 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -211,6 +211,74 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
 	commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
 }
 
+static long change_softleaf_pte(struct vm_area_struct *vma,
+	unsigned long addr, pte_t *pte, pte_t oldpte, unsigned long cp_flags)
+{
+	const bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
+	const bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+	softleaf_t entry = softleaf_from_pte(oldpte);
+	pte_t newpte;
+
+	if (softleaf_is_migration_write(entry)) {
+		const struct folio *folio = softleaf_to_folio(entry);
+
+		/*
+		 * A protection check is difficult so
+		 * just be safe and disable write
+		 */
+		if (folio_test_anon(folio))
+			entry = make_readable_exclusive_migration_entry(
+					     swp_offset(entry));
+		else
+			entry = make_readable_migration_entry(swp_offset(entry));
+		newpte = swp_entry_to_pte(entry);
+		if (pte_swp_soft_dirty(oldpte))
+			newpte = pte_swp_mksoft_dirty(newpte);
+	} else if (softleaf_is_device_private_write(entry)) {
+		/*
+		 * We do not preserve soft-dirtiness. See
+		 * copy_nonpresent_pte() for explanation.
+		 */
+		entry = make_readable_device_private_entry(
+					swp_offset(entry));
+		newpte = swp_entry_to_pte(entry);
+		if (pte_swp_uffd_wp(oldpte))
+			newpte = pte_swp_mkuffd_wp(newpte);
+	} else if (softleaf_is_marker(entry)) {
+		/*
+		 * Ignore error swap entries unconditionally,
+		 * because any access should sigbus/sigsegv
+		 * anyway.
+		 */
+		if (softleaf_is_poison_marker(entry) ||
+		    softleaf_is_guard_marker(entry))
+			return 0;
+		/*
+		 * If this is uffd-wp pte marker and we'd like
+		 * to unprotect it, drop it; the next page
+		 * fault will trigger without uffd trapping.
+		 */
+		if (uffd_wp_resolve) {
+			pte_clear(vma->vm_mm, addr, pte);
+			return 1;
+		}
+		return 0;
+	} else {
+		newpte = oldpte;
+	}
+
+	if (uffd_wp)
+		newpte = pte_swp_mkuffd_wp(newpte);
+	else if (uffd_wp_resolve)
+		newpte = pte_swp_clear_uffd_wp(newpte);
+
+	if (!pte_same(oldpte, newpte)) {
+		set_pte_at(vma->vm_mm, addr, pte, newpte);
+		return 1;
+	}
+	return 0;
+}
+
 static long change_pte_range(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
@@ -317,66 +385,7 @@ static long change_pte_range(struct mmu_gather *tlb,
 				pages++;
 			}
 		} else  {
-			softleaf_t entry = softleaf_from_pte(oldpte);
-			pte_t newpte;
-
-			if (softleaf_is_migration_write(entry)) {
-				const struct folio *folio = softleaf_to_folio(entry);
-
-				/*
-				 * A protection check is difficult so
-				 * just be safe and disable write
-				 */
-				if (folio_test_anon(folio))
-					entry = make_readable_exclusive_migration_entry(
-							     swp_offset(entry));
-				else
-					entry = make_readable_migration_entry(swp_offset(entry));
-				newpte = swp_entry_to_pte(entry);
-				if (pte_swp_soft_dirty(oldpte))
-					newpte = pte_swp_mksoft_dirty(newpte);
-			} else if (softleaf_is_device_private_write(entry)) {
-				/*
-				 * We do not preserve soft-dirtiness. See
-				 * copy_nonpresent_pte() for explanation.
-				 */
-				entry = make_readable_device_private_entry(
-							swp_offset(entry));
-				newpte = swp_entry_to_pte(entry);
-				if (pte_swp_uffd_wp(oldpte))
-					newpte = pte_swp_mkuffd_wp(newpte);
-			} else if (softleaf_is_marker(entry)) {
-				/*
-				 * Ignore error swap entries unconditionally,
-				 * because any access should sigbus/sigsegv
-				 * anyway.
-				 */
-				if (softleaf_is_poison_marker(entry) ||
-				    softleaf_is_guard_marker(entry))
-					continue;
-				/*
-				 * If this is uffd-wp pte marker and we'd like
-				 * to unprotect it, drop it; the next page
-				 * fault will trigger without uffd trapping.
-				 */
-				if (uffd_wp_resolve) {
-					pte_clear(vma->vm_mm, addr, pte);
-					pages++;
-				}
-				continue;
-			} else {
-				newpte = oldpte;
-			}
-
-			if (uffd_wp)
-				newpte = pte_swp_mkuffd_wp(newpte);
-			else if (uffd_wp_resolve)
-				newpte = pte_swp_clear_uffd_wp(newpte);
-
-			if (!pte_same(oldpte, newpte)) {
-				set_pte_at(vma->vm_mm, addr, pte, newpte);
-				pages++;
-			}
+			pages += change_softleaf_pte(vma, addr, pte, oldpte, cp_flags);
 		}
 	} while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
 	lazy_mmu_mode_disable();
-- 
2.53.0
Re: [PATCH v2 1/2] mm/mprotect: move softleaf code out of the main function
Posted by David Hildenbrand (Arm) 1 week, 2 days ago
On 3/24/26 16:43, Pedro Falcato wrote:
> Move softleaf change_pte_range code into a separate function. This makes
> the change_pte_range() function a good bit smaller, and lessens cognitive
> load when reading through the function.
> 
> Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
> Signed-off-by: Pedro Falcato <pfalcato@suse.de>
> ---
>  mm/mprotect.c | 129 +++++++++++++++++++++++++++-----------------------
>  1 file changed, 69 insertions(+), 60 deletions(-)
> 
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 9681f055b9fc..2eaf862e5734 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -211,6 +211,74 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
>  	commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
>  }
>  
> +static long change_softleaf_pte(struct vm_area_struct *vma,
> +	unsigned long addr, pte_t *pte, pte_t oldpte, unsigned long cp_flags)
> +{
> +	const bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
> +	const bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
> +	softleaf_t entry = softleaf_from_pte(oldpte);
> +	pte_t newpte;
> +
> +	if (softleaf_is_migration_write(entry)) {
> +		const struct folio *folio = softleaf_to_folio(entry);
> +
> +		/*
> +		 * A protection check is difficult so
> +		 * just be safe and disable write
> +		 */
> +		if (folio_test_anon(folio))
> +			entry = make_readable_exclusive_migration_entry(
> +					     swp_offset(entry));

We can now even fit this into a single line without feeling bad :)

> +		else
> +			entry = make_readable_migration_entry(swp_offset(entry));
> +		newpte = swp_entry_to_pte(entry);
> +		if (pte_swp_soft_dirty(oldpte))
> +			newpte = pte_swp_mksoft_dirty(newpte);
> +	} else if (softleaf_is_device_private_write(entry)) {
> +		/*
> +		 * We do not preserve soft-dirtiness. See
> +		 * copy_nonpresent_pte() for explanation.
> +		 */
> +		entry = make_readable_device_private_entry(
> +					swp_offset(entry));

Same here.

> +		newpte = swp_entry_to_pte(entry);
> +		if (pte_swp_uffd_wp(oldpte))
> +			newpte = pte_swp_mkuffd_wp(newpte);
> +	} else if (softleaf_is_marker(entry)) {
> +		/*

Acked-by: David Hildenbrand (Arm) <david@kernel.org>

-- 
Cheers,

David