Tidy up the implementation of force_pte_mapping() to make it easier to
read and introduce the split_leaf_mapping_possible() helper to reduce
code duplication in split_kernel_leaf_mapping() and
arch_kfence_init_pool().
Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
arch/arm64/mm/mmu.c | 43 +++++++++++++++++++++++--------------------
1 file changed, 23 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 652bb8c14035..2ba01dc8ef82 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -710,12 +710,26 @@ static int split_kernel_leaf_mapping_locked(unsigned long addr)
static inline bool force_pte_mapping(void)
{
- bool bbml2 = system_capabilities_finalized() ?
+ const bool bbml2 = system_capabilities_finalized() ?
system_supports_bbml2_noabort() : cpu_supports_bbml2_noabort();
- return (!bbml2 && (rodata_full || arm64_kfence_can_set_direct_map() ||
- is_realm_world())) ||
- debug_pagealloc_enabled();
+ if (debug_pagealloc_enabled())
+ return true;
+ if (bbml2)
+ return false;
+ return rodata_full || arm64_kfence_can_set_direct_map() || is_realm_world();
+}
+
+static inline bool split_leaf_mapping_possible(void)
+{
+ /*
+ * !BBML2_NOABORT systems should never run into scenarios where we would
+ * have to split. So exit early and let calling code detect it and raise
+ * a warning.
+ */
+ if (!system_supports_bbml2_noabort())
+ return false;
+ return !force_pte_mapping();
}
static DEFINE_MUTEX(pgtable_split_lock);
@@ -725,22 +739,11 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
int ret;
/*
- * !BBML2_NOABORT systems should not be trying to change permissions on
- * anything that is not pte-mapped in the first place. Just return early
- * and let the permission change code raise a warning if not already
- * pte-mapped.
- */
- if (!system_supports_bbml2_noabort())
- return 0;
-
- /*
- * If the region is within a pte-mapped area, there is no need to try to
- * split. Additionally, CONFIG_DEBUG_PAGEALLOC and CONFIG_KFENCE may
- * change permissions from atomic context so for those cases (which are
- * always pte-mapped), we must not go any further because taking the
- * mutex below may sleep.
+ * Exit early if the region is within a pte-mapped area or if we can't
+ * split. For the latter case, the permission change code will raise a
+ * warning if not already pte-mapped.
*/
- if (force_pte_mapping() || is_kfence_address((void *)start))
+ if (!split_leaf_mapping_possible() || is_kfence_address((void *)start))
return 0;
/*
@@ -1039,7 +1042,7 @@ bool arch_kfence_init_pool(void)
int ret;
/* Exit early if we know the linear map is already pte-mapped. */
- if (!system_supports_bbml2_noabort() || force_pte_mapping())
+ if (!split_leaf_mapping_possible())
return true;
/* Kfence pool is already pte-mapped for the early init case. */
--
2.43.0
On 06.11.25 17:09, Ryan Roberts wrote: > Tidy up the implementation of force_pte_mapping() to make it easier to > read and introduce the split_leaf_mapping_possible() helper to reduce > code duplication in split_kernel_leaf_mapping() and > arch_kfence_init_pool(). > > Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org> > Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> > --- Reviewed-by: David Hildenbrand (Red Hat) <david@kernel.org> -- Cheers David
On 11/6/25 8:09 AM, Ryan Roberts wrote:
> Tidy up the implementation of force_pte_mapping() to make it easier to
> read and introduce the split_leaf_mapping_possible() helper to reduce
> code duplication in split_kernel_leaf_mapping() and
> arch_kfence_init_pool().
>
> Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Yang Shi <yang@os.amperecomputing.com>
Thanks,
Yang
> ---
> arch/arm64/mm/mmu.c | 43 +++++++++++++++++++++++--------------------
> 1 file changed, 23 insertions(+), 20 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 652bb8c14035..2ba01dc8ef82 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -710,12 +710,26 @@ static int split_kernel_leaf_mapping_locked(unsigned long addr)
>
> static inline bool force_pte_mapping(void)
> {
> - bool bbml2 = system_capabilities_finalized() ?
> + const bool bbml2 = system_capabilities_finalized() ?
> system_supports_bbml2_noabort() : cpu_supports_bbml2_noabort();
>
> - return (!bbml2 && (rodata_full || arm64_kfence_can_set_direct_map() ||
> - is_realm_world())) ||
> - debug_pagealloc_enabled();
> + if (debug_pagealloc_enabled())
> + return true;
> + if (bbml2)
> + return false;
> + return rodata_full || arm64_kfence_can_set_direct_map() || is_realm_world();
> +}
> +
> +static inline bool split_leaf_mapping_possible(void)
> +{
> + /*
> + * !BBML2_NOABORT systems should never run into scenarios where we would
> + * have to split. So exit early and let calling code detect it and raise
> + * a warning.
> + */
> + if (!system_supports_bbml2_noabort())
> + return false;
> + return !force_pte_mapping();
> }
>
> static DEFINE_MUTEX(pgtable_split_lock);
> @@ -725,22 +739,11 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
> int ret;
>
> /*
> - * !BBML2_NOABORT systems should not be trying to change permissions on
> - * anything that is not pte-mapped in the first place. Just return early
> - * and let the permission change code raise a warning if not already
> - * pte-mapped.
> - */
> - if (!system_supports_bbml2_noabort())
> - return 0;
> -
> - /*
> - * If the region is within a pte-mapped area, there is no need to try to
> - * split. Additionally, CONFIG_DEBUG_PAGEALLOC and CONFIG_KFENCE may
> - * change permissions from atomic context so for those cases (which are
> - * always pte-mapped), we must not go any further because taking the
> - * mutex below may sleep.
> + * Exit early if the region is within a pte-mapped area or if we can't
> + * split. For the latter case, the permission change code will raise a
> + * warning if not already pte-mapped.
> */
> - if (force_pte_mapping() || is_kfence_address((void *)start))
> + if (!split_leaf_mapping_possible() || is_kfence_address((void *)start))
> return 0;
>
> /*
> @@ -1039,7 +1042,7 @@ bool arch_kfence_init_pool(void)
> int ret;
>
> /* Exit early if we know the linear map is already pte-mapped. */
> - if (!system_supports_bbml2_noabort() || force_pte_mapping())
> + if (!split_leaf_mapping_possible())
> return true;
>
> /* Kfence pool is already pte-mapped for the early init case. */
© 2016 - 2025 Red Hat, Inc.