[PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs

Yang Shi posted 4 patches 2 months, 1 week ago
There is a newer version of this series
[PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
Posted by Yang Shi 2 months, 1 week ago
The kernel linear mapping is painted in very early stage of system boot.
The cpufeature has not been finalized yet at this point.  So the linear
mapping is determined by the capability of boot CPU.  If the boot CPU
supports BBML2, large block mapping will be used for linear mapping.

But the secondary CPUs may not support BBML2, so repaint the linear mapping
if large block mapping is used and the secondary CPUs don't support BBML2
once cpufeature is finalized on all CPUs.

If the boot CPU doesn't support BBML2 or the secondary CPUs have the
same BBML2 capability with the boot CPU, repainting the linear mapping
is not needed.

Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
---
 arch/arm64/include/asm/mmu.h   |   6 +-
 arch/arm64/kernel/cpufeature.c |   8 ++
 arch/arm64/mm/mmu.c            | 173 +++++++++++++++++++++++++++------
 arch/arm64/mm/pageattr.c       |   2 +-
 arch/arm64/mm/proc.S           |  57 ++++++++---
 5 files changed, 196 insertions(+), 50 deletions(-)

diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 57f4b25e6f33..9bf50e8897e2 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -56,6 +56,8 @@ typedef struct {
  */
 #define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
 
+extern bool linear_map_requires_bbml2;
+
 static inline bool arm64_kernel_unmapped_at_el0(void)
 {
 	return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
@@ -71,7 +73,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       pgprot_t prot, bool page_mappings_only);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 extern void mark_linear_text_alias_ro(void);
-extern int split_kernel_pgtable_mapping(unsigned long start, unsigned long end);
+extern int split_kernel_pgtable_mapping(unsigned long start, unsigned long end,
+					unsigned int flags);
+extern int linear_map_split_to_ptes(void *__unused);
 
 /*
  * This check is triggered during the early boot before the cpufeature
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 1c96016a7a41..23c01d679c40 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -85,6 +85,7 @@
 #include <asm/insn.h>
 #include <asm/kvm_host.h>
 #include <asm/mmu_context.h>
+#include <asm/mmu.h>
 #include <asm/mte.h>
 #include <asm/hypervisor.h>
 #include <asm/processor.h>
@@ -2009,6 +2010,12 @@ static int __init __kpti_install_ng_mappings(void *__unused)
 	return 0;
 }
 
+static void __init linear_map_maybe_split_to_ptes(void)
+{
+	if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort())
+		stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
+}
+
 static void __init kpti_install_ng_mappings(void)
 {
 	/* Check whether KPTI is going to be used */
@@ -3855,6 +3862,7 @@ void __init setup_system_features(void)
 {
 	setup_system_capabilities();
 
+	linear_map_maybe_split_to_ptes();
 	kpti_install_ng_mappings();
 
 	sve_setup();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f63b39613571..22f2d0869fdd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -482,11 +482,11 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
 
 #define INVALID_PHYS_ADDR	-1
 
-static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
+static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
 				       enum pgtable_type pgtable_type)
 {
 	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
-	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
+	struct ptdesc *ptdesc = pagetable_alloc(gfp, 0);
 	phys_addr_t pa;
 
 	if (!ptdesc)
@@ -513,9 +513,16 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
 }
 
 static phys_addr_t __maybe_unused
-split_pgtable_alloc(enum pgtable_type pgtable_type)
+split_pgtable_alloc(enum pgtable_type pgtable_type, int flags)
 {
-	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
+	gfp_t gfp;
+
+	if ((flags & (NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS)) == 0)
+		gfp = GFP_PGTABLE_KERNEL & ~__GFP_ZERO;
+	else
+		gfp = GFP_ATOMIC;
+
+	return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type);
 }
 
 static phys_addr_t __maybe_unused
@@ -523,7 +530,8 @@ pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
 {
 	phys_addr_t pa;
 
-	pa = __pgd_pgtable_alloc(&init_mm, pgtable_type);
+	pa = __pgd_pgtable_alloc(&init_mm, GFP_PGTABLE_KERNEL & ~__GFP_ZERO,
+				 pgtable_type);
 	BUG_ON(pa == INVALID_PHYS_ADDR);
 	return pa;
 }
@@ -533,7 +541,8 @@ pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
 {
 	phys_addr_t pa;
 
-	pa = __pgd_pgtable_alloc(NULL, pgtable_type);
+	pa = __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL & ~__GFP_ZERO,
+				 pgtable_type);
 	BUG_ON(pa == INVALID_PHYS_ADDR);
 	return pa;
 }
@@ -572,7 +581,8 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 
 static DEFINE_MUTEX(pgtable_split_lock);
 
-static int split_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end)
+static int split_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
+			  unsigned int flags)
 {
 	pte_t *ptep;
 	unsigned long next;
@@ -586,14 +596,16 @@ static int split_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end)
 
 		nr = 0;
 		next = pte_cont_addr_end(addr, end);
-		if (next < end)
+		if (next < end &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
 			nr = max(nr, ((end - next) / CONT_PTE_SIZE));
 		span = nr * CONT_PTE_SIZE;
 
 		_ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
 		ptep += pte_index(next) - pte_index(addr) + nr * CONT_PTES;
 
-		if (((addr | next) & ~CONT_PTE_MASK) == 0)
+		if (((addr | next) & ~CONT_PTE_MASK) == 0 &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
 			continue;
 
 		if (!pte_cont(__ptep_get(_ptep)))
@@ -606,7 +618,8 @@ static int split_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end)
 	return 0;
 }
 
-static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end)
+static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
+		     unsigned int flags)
 {
 	unsigned long next;
 	unsigned int nr;
@@ -618,11 +631,13 @@ static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end)
 
 		nr = 1;
 		next = pmd_addr_end(addr, end);
-		if (next < end)
+		if (next < end &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0)
 			nr = max(nr, ((end - next) / PMD_SIZE));
 		span = (nr - 1) * PMD_SIZE;
 
-		if (((addr | next) & ~PMD_MASK) == 0)
+		if (((addr | next) & ~PMD_MASK) == 0 &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0)
 			continue;
 
 		pmd = pmdp_get(pmdp);
@@ -634,7 +649,7 @@ static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end)
 			unsigned long pfn = pmd_pfn(pmd);
 			pgprot_t prot = pmd_pgprot(pmd);
 
-			pte_phys = split_pgtable_alloc(TABLE_PTE);
+			pte_phys = split_pgtable_alloc(TABLE_PTE, flags);
 			if (pte_phys == INVALID_PHYS_ADDR)
 				return -ENOMEM;
 
@@ -643,7 +658,8 @@ static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end)
 
 			prot = __pgprot((pgprot_val(prot) & ~PTE_TYPE_MASK) |
 					PTE_TYPE_PAGE);
-			prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+			if ((flags & NO_CONT_MAPPINGS) == 0)
+				prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 			ptep = (pte_t *)phys_to_virt(pte_phys);
 			for (int i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
 				__set_pte(ptep, pfn_pte(pfn, prot));
@@ -653,7 +669,7 @@ static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end)
 			__pmd_populate(pmdp, pte_phys, pmdval);
 		}
 
-		ret = split_cont_pte(pmdp, addr, next);
+		ret = split_cont_pte(pmdp, addr, next, flags);
 		if (ret)
 			break;
 	} while (pmdp += nr, addr = next + span, addr != end);
@@ -661,7 +677,8 @@ static int split_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end)
 	return ret;
 }
 
-static int split_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end)
+static int split_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
+			  unsigned int flags)
 {
 	pmd_t *pmdp;
 	unsigned long next;
@@ -676,11 +693,13 @@ static int split_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end)
 
 		nr = 0;
 		next = pmd_cont_addr_end(addr, end);
-		if (next < end)
+		if (next < end &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
 			nr = max(nr, ((end - next) / CONT_PMD_SIZE));
 		span = nr * CONT_PMD_SIZE;
 
-		if (((addr | next) & ~CONT_PMD_MASK) == 0) {
+		if (((addr | next) & ~CONT_PMD_MASK) == 0 &&
+		    (flags & NO_CONT_MAPPINGS) == 0) {
 			pmdp += pmd_index(next) - pmd_index(addr) +
 				nr * CONT_PMDS;
 			continue;
@@ -694,7 +713,7 @@ static int split_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end)
 			set_pmd(_pmdp, pmd_mknoncont(pmdp_get(_pmdp)));
 
 split:
-		ret = split_pmd(pmdp, addr, next);
+		ret = split_pmd(pmdp, addr, next, flags);
 		if (ret)
 			break;
 
@@ -704,7 +723,8 @@ static int split_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end)
 	return ret;
 }
 
-static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end)
+static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
+		     unsigned int flags)
 {
 	pud_t *pudp;
 	unsigned long next;
@@ -719,11 +739,13 @@ static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end)
 
 		nr = 1;
 		next = pud_addr_end(addr, end);
-		if (next < end)
+		if (next < end &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0)
 			nr = max(nr, ((end - next) / PUD_SIZE));
 		span = (nr - 1) * PUD_SIZE;
 
-		if (((addr | next) & ~PUD_MASK) == 0)
+		if (((addr | next) & ~PUD_MASK) == 0 &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0)
 			continue;
 
 		pud = pudp_get(pudp);
@@ -736,7 +758,7 @@ static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end)
 			pgprot_t prot = pud_pgprot(pud);
 			unsigned int step = PMD_SIZE >> PAGE_SHIFT;
 
-			pmd_phys = split_pgtable_alloc(TABLE_PMD);
+			pmd_phys = split_pgtable_alloc(TABLE_PMD, flags);
 			if (pmd_phys == INVALID_PHYS_ADDR)
 				return -ENOMEM;
 
@@ -745,7 +767,8 @@ static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end)
 
 			prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) |
 					PMD_TYPE_SECT);
-			prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+			if ((flags & NO_CONT_MAPPINGS) == 0)
+				prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 			pmdp = (pmd_t *)phys_to_virt(pmd_phys);
 			for (int i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
 				set_pmd(pmdp, pfn_pmd(pfn, prot));
@@ -757,7 +780,7 @@ static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end)
 			__pud_populate(pudp, pmd_phys, pudval);
 		}
 
-		ret = split_cont_pmd(pudp, addr, next);
+		ret = split_cont_pmd(pudp, addr, next, flags);
 		if (ret)
 			break;
 	} while (pudp += nr, addr = next + span, addr != end);
@@ -765,7 +788,8 @@ static int split_pud(p4d_t *p4dp, unsigned long addr, unsigned long end)
 	return ret;
 }
 
-static int split_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end)
+static int split_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
+		     unsigned int flags)
 {
 	p4d_t *p4dp;
 	unsigned long next;
@@ -776,7 +800,7 @@ static int split_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end)
 	do {
 		next = p4d_addr_end(addr, end);
 
-		ret = split_pud(p4dp, addr, next);
+		ret = split_pud(p4dp, addr, next, flags);
 		if (ret)
 			break;
 	} while (p4dp++, addr = next, addr != end);
@@ -784,14 +808,15 @@ static int split_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end)
 	return ret;
 }
 
-static int split_pgd(pgd_t *pgdp, unsigned long addr, unsigned long end)
+static int split_pgd(pgd_t *pgdp, unsigned long addr, unsigned long end,
+		     unsigned int flags)
 {
 	unsigned long next;
 	int ret = 0;
 
 	do {
 		next = pgd_addr_end(addr, end);
-		ret = split_p4d(pgdp, addr, next);
+		ret = split_p4d(pgdp, addr, next, flags);
 		if (ret)
 			break;
 	} while (pgdp++, addr = next, addr != end);
@@ -799,7 +824,8 @@ static int split_pgd(pgd_t *pgdp, unsigned long addr, unsigned long end)
 	return ret;
 }
 
-int split_kernel_pgtable_mapping(unsigned long start, unsigned long end)
+int split_kernel_pgtable_mapping(unsigned long start, unsigned long end,
+				 unsigned int flags)
 {
 	int ret;
 
@@ -811,7 +837,7 @@ int split_kernel_pgtable_mapping(unsigned long start, unsigned long end)
 
 	mutex_lock(&pgtable_split_lock);
 	arch_enter_lazy_mmu_mode();
-	ret = split_pgd(pgd_offset_k(start), start, end);
+	ret = split_pgd(pgd_offset_k(start), start, end, flags);
 	arch_leave_lazy_mmu_mode();
 	mutex_unlock(&pgtable_split_lock);
 
@@ -851,6 +877,75 @@ void __init mark_linear_text_alias_ro(void)
 			    PAGE_KERNEL_RO);
 }
 
+extern u32 repaint_done;
+
+int __init linear_map_split_to_ptes(void *__unused)
+{
+	typedef void (repaint_wait_fn)(void);
+	extern repaint_wait_fn bbml2_wait_for_repainting;
+	repaint_wait_fn *wait_fn;
+
+	int cpu = smp_processor_id();
+
+	wait_fn = (void *)__pa_symbol(bbml2_wait_for_repainting);
+
+	/*
+	 * Repainting just can be run on CPU 0 because we just can be sure
+	 * CPU 0 supports BBML2.
+	 */
+	if (!cpu) {
+		phys_addr_t kernel_start = __pa_symbol(_stext);
+		phys_addr_t kernel_end = __pa_symbol(__init_begin);
+		phys_addr_t start, end;
+		unsigned long vstart, vend;
+		int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+		u64 i;
+		int ret;
+
+		/*
+		 * Wait for all secondary CPUs get prepared for repainting
+		 * the linear mapping.
+		 */
+		smp_cond_load_acquire(&repaint_done, VAL == num_online_cpus());
+
+		memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
+		/* Split the whole linear mapping */
+		for_each_mem_range(i, &start, &end) {
+			if (start >= end)
+				return -EINVAL;
+
+			vstart = __phys_to_virt(start);
+			vend = __phys_to_virt(end);
+			ret = split_kernel_pgtable_mapping(vstart, vend, flags);
+			if (ret)
+				panic("Failed to split linear mappings\n");
+
+			flush_tlb_kernel_range(vstart, vend);
+		}
+		memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
+
+		/*
+		 * Relies on dsb in flush_tlb_kernel_range() to avoid
+		 * reordering before any page table split operations.
+		 */
+		WRITE_ONCE(repaint_done, 0);
+	} else {
+		/*
+		 * The secondary CPUs can't run in the same address space
+		 * with CPU 0 because accessing the linear mapping address
+		 * when CPU 0 is repainting it is not safe.
+		 *
+		 * Let the secondary CPUs run busy loop in idmap address
+		 * space when repainting is ongoing.
+		 */
+		cpu_install_idmap();
+		wait_fn();
+		cpu_uninstall_idmap();
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_KFENCE
 
 bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
@@ -1079,7 +1174,8 @@ void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
 		    int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
 
 static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
-	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
+	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
+	  bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
 
 static void __init create_idmap(void)
 {
@@ -1104,6 +1200,19 @@ static void __init create_idmap(void)
 			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
 			       __phys_to_virt(ptep) - ptep);
 	}
+
+	/*
+	 * Setup idmap mapping for repaint_done flag.  It will be used if
+	 * repainting the linear mapping is needed later.
+	 */
+	if (linear_map_requires_bbml2) {
+		u64 pa = __pa_symbol(&repaint_done);
+		ptep = __pa_symbol(bbml2_ptes);
+
+		__pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
+			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
+			       __phys_to_virt(ptep) - ptep);
+	}
 }
 
 void __init paging_init(void)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 6566aa9d8abb..4471d7e510a1 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -140,7 +140,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
 	data.set_mask = set_mask;
 	data.clear_mask = clear_mask;
 
-	ret = split_kernel_pgtable_mapping(start, start + size);
+	ret = split_kernel_pgtable_mapping(start, start + size, 0);
 	if (WARN_ON_ONCE(ret))
 		return ret;
 
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 80d470aa469d..f0f9c49a4466 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -239,6 +239,25 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
 	dsb	nshst
 	.endm
 
+	.macro wait_for_boot_cpu, tmp1, tmp2, tmp3, tmp4
+	/* Increment the flag to let the boot CPU know we're ready */
+1:	ldxr	\tmp3, [\tmp2]
+	add	\tmp3, \tmp3, #1
+	stxr	\tmp4, \tmp3, [\tmp2]
+	cbnz	\tmp4, 1b
+
+	/* Wait for the boot CPU to finish its job */
+	sevl
+1:	wfe
+	ldxr	\tmp3, [\tmp2]
+	cbnz	\tmp3, 1b
+
+	/* All done, act like nothing happened */
+	msr	ttbr1_el1, \tmp1
+	isb
+	ret
+	.endm
+
 /*
  * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
  *				   unsigned long temp_pte_va)
@@ -416,29 +435,35 @@ alternative_else_nop_endif
 __idmap_kpti_secondary:
 	/* Uninstall swapper before surgery begins */
 	__idmap_cpu_set_reserved_ttbr1 x16, x17
+	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
 
-	/* Increment the flag to let the boot CPU we're ready */
-1:	ldxr	w16, [flag_ptr]
-	add	w16, w16, #1
-	stxr	w17, w16, [flag_ptr]
-	cbnz	w17, 1b
+	.unreq	swapper_ttb
+	.unreq	flag_ptr
+SYM_FUNC_END(idmap_kpti_install_ng_mappings)
+	.popsection
+#endif
 
-	/* Wait for the boot CPU to finish messing around with swapper */
-	sevl
-1:	wfe
-	ldxr	w16, [flag_ptr]
-	cbnz	w16, 1b
+/*
+ * Wait for repainting is done. Run on secondary CPUs
+ * only.
+ */
+	.pushsection	".data", "aw", %progbits
+SYM_DATA(repaint_done, .long 1)
+	.popsection
 
-	/* All done, act like nothing happened */
-	msr	ttbr1_el1, swapper_ttb
-	isb
-	ret
+	.pushsection ".idmap.text", "a"
+SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
+	swapper_ttb	.req	x0
+	flag_ptr	.req	x1
+	mrs     swapper_ttb, ttbr1_el1
+	adr_l   flag_ptr, repaint_done
+	__idmap_cpu_set_reserved_ttbr1 x16, x17
+	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
 
 	.unreq	swapper_ttb
 	.unreq	flag_ptr
-SYM_FUNC_END(idmap_kpti_install_ng_mappings)
+SYM_FUNC_END(bbml2_wait_for_repainting)
 	.popsection
-#endif
 
 /*
  *	__cpu_setup
-- 
2.50.0
Re: [PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
Posted by Ryan Roberts 2 months ago
On 24/07/2025 23:11, Yang Shi wrote:
> The kernel linear mapping is painted in very early stage of system boot.
> The cpufeature has not been finalized yet at this point.  So the linear
> mapping is determined by the capability of boot CPU.  If the boot CPU
> supports BBML2, large block mapping will be used for linear mapping.
> 
> But the secondary CPUs may not support BBML2, so repaint the linear mapping
> if large block mapping is used and the secondary CPUs don't support BBML2
> once cpufeature is finalized on all CPUs.
> 
> If the boot CPU doesn't support BBML2 or the secondary CPUs have the
> same BBML2 capability with the boot CPU, repainting the linear mapping
> is not needed.
> 
> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
> ---
>  arch/arm64/include/asm/mmu.h   |   6 +-
>  arch/arm64/kernel/cpufeature.c |   8 ++
>  arch/arm64/mm/mmu.c            | 173 +++++++++++++++++++++++++++------
>  arch/arm64/mm/pageattr.c       |   2 +-
>  arch/arm64/mm/proc.S           |  57 ++++++++---
>  5 files changed, 196 insertions(+), 50 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index 57f4b25e6f33..9bf50e8897e2 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -56,6 +56,8 @@ typedef struct {
>   */
>  #define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
>  
> +extern bool linear_map_requires_bbml2;
> +
>  static inline bool arm64_kernel_unmapped_at_el0(void)
>  {
>  	return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
> @@ -71,7 +73,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  			       pgprot_t prot, bool page_mappings_only);
>  extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>  extern void mark_linear_text_alias_ro(void);
> -extern int split_kernel_pgtable_mapping(unsigned long start, unsigned long end);
> +extern int split_kernel_pgtable_mapping(unsigned long start, unsigned long end,
> +					unsigned int flags);
> +extern int linear_map_split_to_ptes(void *__unused);
>  
>  /*
>   * This check is triggered during the early boot before the cpufeature
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 1c96016a7a41..23c01d679c40 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -85,6 +85,7 @@
>  #include <asm/insn.h>
>  #include <asm/kvm_host.h>
>  #include <asm/mmu_context.h>
> +#include <asm/mmu.h>
>  #include <asm/mte.h>
>  #include <asm/hypervisor.h>
>  #include <asm/processor.h>
> @@ -2009,6 +2010,12 @@ static int __init __kpti_install_ng_mappings(void *__unused)
>  	return 0;
>  }
>  
> +static void __init linear_map_maybe_split_to_ptes(void)
> +{
> +	if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort())
> +		stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
> +}
> +
>  static void __init kpti_install_ng_mappings(void)
>  {
>  	/* Check whether KPTI is going to be used */
> @@ -3855,6 +3862,7 @@ void __init setup_system_features(void)
>  {
>  	setup_system_capabilities();
>  
> +	linear_map_maybe_split_to_ptes();
>  	kpti_install_ng_mappings();
>  
>  	sve_setup();
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index f63b39613571..22f2d0869fdd 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -482,11 +482,11 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
>  
>  #define INVALID_PHYS_ADDR	-1
>  

[...]

I'll review the actual walker separately (I've run out of time today).


>  
> +extern u32 repaint_done;
> +
> +int __init linear_map_split_to_ptes(void *__unused)
> +{
> +	typedef void (repaint_wait_fn)(void);
> +	extern repaint_wait_fn bbml2_wait_for_repainting;
> +	repaint_wait_fn *wait_fn;
> +
> +	int cpu = smp_processor_id();
> +
> +	wait_fn = (void *)__pa_symbol(bbml2_wait_for_repainting);
> +
> +	/*
> +	 * Repainting just can be run on CPU 0 because we just can be sure
> +	 * CPU 0 supports BBML2.
> +	 */
> +	if (!cpu) {
> +		phys_addr_t kernel_start = __pa_symbol(_stext);
> +		phys_addr_t kernel_end = __pa_symbol(__init_begin);
> +		phys_addr_t start, end;
> +		unsigned long vstart, vend;
> +		int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> +		u64 i;
> +		int ret;
> +
> +		/*
> +		 * Wait for all secondary CPUs get prepared for repainting
> +		 * the linear mapping.
> +		 */
> +		smp_cond_load_acquire(&repaint_done, VAL == num_online_cpus());

Is this correct? I would have thought the primary is waiting for the
secondaries, so "VAL == num_online_cpus() - 1" ?

> +
> +		memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
> +		/* Split the whole linear mapping */
> +		for_each_mem_range(i, &start, &end) {

I think I asked this in the last round; but I just want to double check;
memblock is definitely still valid here and we are definitely going to get
exactly the same regions out as we did in map_mem()? I wonder if it's possible
between then and now that some other component has reserved some memory? In that
case we wouldn't walk that region?

Perhaps it would be safer (and simpler) to just walk all of [PAGE_OFFSET,
_stext) and [__init_begin, PAGE_END) and ignore the holes?

> +			if (start >= end)
> +				return -EINVAL;
> +
> +			vstart = __phys_to_virt(start);
> +			vend = __phys_to_virt(end);
> +			ret = split_kernel_pgtable_mapping(vstart, vend, flags);
> +			if (ret)
> +				panic("Failed to split linear mappings\n");
> +
> +			flush_tlb_kernel_range(vstart, vend);
> +		}
> +		memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
> +
> +		/*
> +		 * Relies on dsb in flush_tlb_kernel_range() to avoid
> +		 * reordering before any page table split operations.
> +		 */
> +		WRITE_ONCE(repaint_done, 0);
> +	} else {
> +		/*
> +		 * The secondary CPUs can't run in the same address space
> +		 * with CPU 0 because accessing the linear mapping address
> +		 * when CPU 0 is repainting it is not safe.
> +		 *
> +		 * Let the secondary CPUs run busy loop in idmap address
> +		 * space when repainting is ongoing.
> +		 */
> +		cpu_install_idmap();
> +		wait_fn();
> +		cpu_uninstall_idmap();
> +	}
> +
> +	return 0;
> +}
> +
>  #ifdef CONFIG_KFENCE
>  
>  bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
> @@ -1079,7 +1174,8 @@ void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
>  		    int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
>  
>  static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
> -	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
> +	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
> +	  bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
>  
>  static void __init create_idmap(void)
>  {
> @@ -1104,6 +1200,19 @@ static void __init create_idmap(void)
>  			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
>  			       __phys_to_virt(ptep) - ptep);
>  	}
> +
> +	/*
> +	 * Setup idmap mapping for repaint_done flag.  It will be used if
> +	 * repainting the linear mapping is needed later.
> +	 */
> +	if (linear_map_requires_bbml2) {
> +		u64 pa = __pa_symbol(&repaint_done);
> +		ptep = __pa_symbol(bbml2_ptes);
> +
> +		__pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
> +			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
> +			       __phys_to_virt(ptep) - ptep);
> +	}
>  }
>  
>  void __init paging_init(void)
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 6566aa9d8abb..4471d7e510a1 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -140,7 +140,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
>  	data.set_mask = set_mask;
>  	data.clear_mask = clear_mask;
>  
> -	ret = split_kernel_pgtable_mapping(start, start + size);
> +	ret = split_kernel_pgtable_mapping(start, start + size, 0);
>  	if (WARN_ON_ONCE(ret))
>  		return ret;
>  
> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
> index 80d470aa469d..f0f9c49a4466 100644
> --- a/arch/arm64/mm/proc.S
> +++ b/arch/arm64/mm/proc.S
> @@ -239,6 +239,25 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
>  	dsb	nshst
>  	.endm
>  
> +	.macro wait_for_boot_cpu, tmp1, tmp2, tmp3, tmp4
> +	/* Increment the flag to let the boot CPU know we're ready */
> +1:	ldxr	\tmp3, [\tmp2]
> +	add	\tmp3, \tmp3, #1
> +	stxr	\tmp4, \tmp3, [\tmp2]
> +	cbnz	\tmp4, 1b
> +
> +	/* Wait for the boot CPU to finish its job */
> +	sevl
> +1:	wfe
> +	ldxr	\tmp3, [\tmp2]
> +	cbnz	\tmp3, 1b
> +
> +	/* All done, act like nothing happened */
> +	msr	ttbr1_el1, \tmp1
> +	isb
> +	ret
> +	.endm

You've defined the macro within "#ifdef CONFIG_UNMAP_KERNEL_AT_EL0" but then
need to use it outside of that scope.

But I don't think this needs to be a macro; I think it would be better as a
function (as I suggested in the last round). Then the text only needs to appear
once in the image and it can be used from both places (see below).

> +
>  /*
>   * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
>   *				   unsigned long temp_pte_va)
> @@ -416,29 +435,35 @@ alternative_else_nop_endif
>  __idmap_kpti_secondary:
>  	/* Uninstall swapper before surgery begins */
>  	__idmap_cpu_set_reserved_ttbr1 x16, x17
> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>  
> -	/* Increment the flag to let the boot CPU we're ready */
> -1:	ldxr	w16, [flag_ptr]
> -	add	w16, w16, #1
> -	stxr	w17, w16, [flag_ptr]
> -	cbnz	w17, 1b
> +	.unreq	swapper_ttb
> +	.unreq	flag_ptr
> +SYM_FUNC_END(idmap_kpti_install_ng_mappings)
> +	.popsection
> +#endif
>  
> -	/* Wait for the boot CPU to finish messing around with swapper */
> -	sevl
> -1:	wfe
> -	ldxr	w16, [flag_ptr]
> -	cbnz	w16, 1b
> +/*
> + * Wait for repainting is done. Run on secondary CPUs
> + * only.
> + */
> +	.pushsection	".data", "aw", %progbits
> +SYM_DATA(repaint_done, .long 1)
> +	.popsection
>  
> -	/* All done, act like nothing happened */
> -	msr	ttbr1_el1, swapper_ttb
> -	isb
> -	ret
> +	.pushsection ".idmap.text", "a"
> +SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
> +	swapper_ttb	.req	x0
> +	flag_ptr	.req	x1
> +	mrs     swapper_ttb, ttbr1_el1
> +	adr_l   flag_ptr, repaint_done
> +	__idmap_cpu_set_reserved_ttbr1 x16, x17
> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>  
>  	.unreq	swapper_ttb
>  	.unreq	flag_ptr
> -SYM_FUNC_END(idmap_kpti_install_ng_mappings)
> +SYM_FUNC_END(bbml2_wait_for_repainting)
>  	.popsection
> -#endif

How about this instead?

---8<---
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8c75965afc9e..a116b2b8ad59 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -416,8 +416,30 @@ alternative_else_nop_endif
 __idmap_kpti_secondary:
 	/* Uninstall swapper before surgery begins */
 	__idmap_cpu_set_reserved_ttbr1 x16, x17
+	b scondary_cpu_wait
+
+	.unreq	swapper_ttb
+	.unreq	flag_ptr
+SYM_FUNC_END(idmap_kpti_install_ng_mappings)
+	.popsection
+#endif
+
+	.pushsection	".data", "aw", %progbits
+SYM_DATA(repaint_done, .long 1)
+	.popsection
+
+	.pushsection ".idmap.text", "a"
+SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
+	/* Must be same registers as in idmap_kpti_install_ng_mappings */
+	swapper_ttb	.req	x3
+	flag_ptr	.req	x4
+
+	mrs     swapper_ttb, ttbr1_el1
+	adr_l   flag_ptr, repaint_done
+	__idmap_cpu_set_reserved_ttbr1 x16, x17

 	/* Increment the flag to let the boot CPU we're ready */
+scondary_cpu_wait:
 1:	ldxr	w16, [flag_ptr]
 	add	w16, w16, #1
 	stxr	w17, w16, [flag_ptr]
@@ -436,9 +458,8 @@ __idmap_kpti_secondary:

 	.unreq	swapper_ttb
 	.unreq	flag_ptr
-SYM_FUNC_END(idmap_kpti_install_ng_mappings)
+SYM_FUNC_END(bbml2_wait_for_repainting)
 	.popsection
-#endif

 /*
  *	__cpu_setup
---8<---

Thanks,
Ryan
Re: [PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
Posted by Yang Shi 2 months ago

On 8/1/25 9:14 AM, Ryan Roberts wrote:
> On 24/07/2025 23:11, Yang Shi wrote:
>> The kernel linear mapping is painted in very early stage of system boot.
>> The cpufeature has not been finalized yet at this point.  So the linear
>> mapping is determined by the capability of boot CPU.  If the boot CPU
>> supports BBML2, large block mapping will be used for linear mapping.
>>
>> But the secondary CPUs may not support BBML2, so repaint the linear mapping
>> if large block mapping is used and the secondary CPUs don't support BBML2
>> once cpufeature is finalized on all CPUs.
>>
>> If the boot CPU doesn't support BBML2 or the secondary CPUs have the
>> same BBML2 capability with the boot CPU, repainting the linear mapping
>> is not needed.
>>
>> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
>> ---
>>   arch/arm64/include/asm/mmu.h   |   6 +-
>>   arch/arm64/kernel/cpufeature.c |   8 ++
>>   arch/arm64/mm/mmu.c            | 173 +++++++++++++++++++++++++++------
>>   arch/arm64/mm/pageattr.c       |   2 +-
>>   arch/arm64/mm/proc.S           |  57 ++++++++---
>>   5 files changed, 196 insertions(+), 50 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
>> index 57f4b25e6f33..9bf50e8897e2 100644
>> --- a/arch/arm64/include/asm/mmu.h
>> +++ b/arch/arm64/include/asm/mmu.h
>> @@ -56,6 +56,8 @@ typedef struct {
>>    */
>>   #define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
>>   
>> +extern bool linear_map_requires_bbml2;
>> +
>>   static inline bool arm64_kernel_unmapped_at_el0(void)
>>   {
>>   	return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
>> @@ -71,7 +73,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>>   			       pgprot_t prot, bool page_mappings_only);
>>   extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>>   extern void mark_linear_text_alias_ro(void);
>> -extern int split_kernel_pgtable_mapping(unsigned long start, unsigned long end);
>> +extern int split_kernel_pgtable_mapping(unsigned long start, unsigned long end,
>> +					unsigned int flags);
>> +extern int linear_map_split_to_ptes(void *__unused);
>>   
>>   /*
>>    * This check is triggered during the early boot before the cpufeature
>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index 1c96016a7a41..23c01d679c40 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -85,6 +85,7 @@
>>   #include <asm/insn.h>
>>   #include <asm/kvm_host.h>
>>   #include <asm/mmu_context.h>
>> +#include <asm/mmu.h>
>>   #include <asm/mte.h>
>>   #include <asm/hypervisor.h>
>>   #include <asm/processor.h>
>> @@ -2009,6 +2010,12 @@ static int __init __kpti_install_ng_mappings(void *__unused)
>>   	return 0;
>>   }
>>   
>> +static void __init linear_map_maybe_split_to_ptes(void)
>> +{
>> +	if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort())
>> +		stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask);
>> +}
>> +
>>   static void __init kpti_install_ng_mappings(void)
>>   {
>>   	/* Check whether KPTI is going to be used */
>> @@ -3855,6 +3862,7 @@ void __init setup_system_features(void)
>>   {
>>   	setup_system_capabilities();
>>   
>> +	linear_map_maybe_split_to_ptes();
>>   	kpti_install_ng_mappings();
>>   
>>   	sve_setup();
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index f63b39613571..22f2d0869fdd 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -482,11 +482,11 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
>>   
>>   #define INVALID_PHYS_ADDR	-1
>>   
> [...]
>
> I'll review the actual walker separately (I've run out of time today).
>
>
>>   
>> +extern u32 repaint_done;
>> +
>> +int __init linear_map_split_to_ptes(void *__unused)
>> +{
>> +	typedef void (repaint_wait_fn)(void);
>> +	extern repaint_wait_fn bbml2_wait_for_repainting;
>> +	repaint_wait_fn *wait_fn;
>> +
>> +	int cpu = smp_processor_id();
>> +
>> +	wait_fn = (void *)__pa_symbol(bbml2_wait_for_repainting);
>> +
>> +	/*
>> +	 * Repainting just can be run on CPU 0 because we just can be sure
>> +	 * CPU 0 supports BBML2.
>> +	 */
>> +	if (!cpu) {
>> +		phys_addr_t kernel_start = __pa_symbol(_stext);
>> +		phys_addr_t kernel_end = __pa_symbol(__init_begin);
>> +		phys_addr_t start, end;
>> +		unsigned long vstart, vend;
>> +		int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>> +		u64 i;
>> +		int ret;
>> +
>> +		/*
>> +		 * Wait for all secondary CPUs get prepared for repainting
>> +		 * the linear mapping.
>> +		 */
>> +		smp_cond_load_acquire(&repaint_done, VAL == num_online_cpus());
> Is this correct? I would have thought the primary is waiting for the
> secondaries, so "VAL == num_online_cpus() - 1" ?

It is correct because repaint_done is initialized to 1.

>
>> +
>> +		memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
>> +		/* Split the whole linear mapping */
>> +		for_each_mem_range(i, &start, &end) {
> I think I asked this in the last round; but I just want to double check;
> memblock is definitely still valid here and we are definitely going to get
> exactly the same regions out as we did in map_mem()? I wonder if it's possible
> between then and now that some other component has reserved some memory? In that
> case we wouldn't walk that region?

I think it is kept unchanged. The ptdump shows no block or cont mappings 
for linear mapping if I force repaint the page table.

>
> Perhaps it would be safer (and simpler) to just walk all of [PAGE_OFFSET,
> _stext) and [__init_begin, PAGE_END) and ignore the holes?

This may walk some holes, particularly for multiple sockets machines? It 
doesn't have correctness issue, but just not very efficient.

>
>> +			if (start >= end)
>> +				return -EINVAL;
>> +
>> +			vstart = __phys_to_virt(start);
>> +			vend = __phys_to_virt(end);
>> +			ret = split_kernel_pgtable_mapping(vstart, vend, flags);
>> +			if (ret)
>> +				panic("Failed to split linear mappings\n");
>> +
>> +			flush_tlb_kernel_range(vstart, vend);
>> +		}
>> +		memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
>> +
>> +		/*
>> +		 * Relies on dsb in flush_tlb_kernel_range() to avoid
>> +		 * reordering before any page table split operations.
>> +		 */
>> +		WRITE_ONCE(repaint_done, 0);
>> +	} else {
>> +		/*
>> +		 * The secondary CPUs can't run in the same address space
>> +		 * with CPU 0 because accessing the linear mapping address
>> +		 * when CPU 0 is repainting it is not safe.
>> +		 *
>> +		 * Let the secondary CPUs run busy loop in idmap address
>> +		 * space when repainting is ongoing.
>> +		 */
>> +		cpu_install_idmap();
>> +		wait_fn();
>> +		cpu_uninstall_idmap();
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>>   #ifdef CONFIG_KFENCE
>>   
>>   bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
>> @@ -1079,7 +1174,8 @@ void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
>>   		    int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
>>   
>>   static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
>> -	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
>> +	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
>> +	  bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
>>   
>>   static void __init create_idmap(void)
>>   {
>> @@ -1104,6 +1200,19 @@ static void __init create_idmap(void)
>>   			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
>>   			       __phys_to_virt(ptep) - ptep);
>>   	}
>> +
>> +	/*
>> +	 * Setup idmap mapping for repaint_done flag.  It will be used if
>> +	 * repainting the linear mapping is needed later.
>> +	 */
>> +	if (linear_map_requires_bbml2) {
>> +		u64 pa = __pa_symbol(&repaint_done);
>> +		ptep = __pa_symbol(bbml2_ptes);
>> +
>> +		__pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
>> +			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
>> +			       __phys_to_virt(ptep) - ptep);
>> +	}
>>   }
>>   
>>   void __init paging_init(void)
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index 6566aa9d8abb..4471d7e510a1 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -140,7 +140,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
>>   	data.set_mask = set_mask;
>>   	data.clear_mask = clear_mask;
>>   
>> -	ret = split_kernel_pgtable_mapping(start, start + size);
>> +	ret = split_kernel_pgtable_mapping(start, start + size, 0);
>>   	if (WARN_ON_ONCE(ret))
>>   		return ret;
>>   
>> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
>> index 80d470aa469d..f0f9c49a4466 100644
>> --- a/arch/arm64/mm/proc.S
>> +++ b/arch/arm64/mm/proc.S
>> @@ -239,6 +239,25 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
>>   	dsb	nshst
>>   	.endm
>>   
>> +	.macro wait_for_boot_cpu, tmp1, tmp2, tmp3, tmp4
>> +	/* Increment the flag to let the boot CPU know we're ready */
>> +1:	ldxr	\tmp3, [\tmp2]
>> +	add	\tmp3, \tmp3, #1
>> +	stxr	\tmp4, \tmp3, [\tmp2]
>> +	cbnz	\tmp4, 1b
>> +
>> +	/* Wait for the boot CPU to finish its job */
>> +	sevl
>> +1:	wfe
>> +	ldxr	\tmp3, [\tmp2]
>> +	cbnz	\tmp3, 1b
>> +
>> +	/* All done, act like nothing happened */
>> +	msr	ttbr1_el1, \tmp1
>> +	isb
>> +	ret
>> +	.endm
> You've defined the macro within "#ifdef CONFIG_UNMAP_KERNEL_AT_EL0" but then
> need to use it outside of that scope.
>
> But I don't think this needs to be a macro; I think it would be better as a
> function (as I suggested in the last round). Then the text only needs to appear
> once in the image and it can be used from both places (see below).
>
>> +
>>   /*
>>    * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
>>    *				   unsigned long temp_pte_va)
>> @@ -416,29 +435,35 @@ alternative_else_nop_endif
>>   __idmap_kpti_secondary:
>>   	/* Uninstall swapper before surgery begins */
>>   	__idmap_cpu_set_reserved_ttbr1 x16, x17
>> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>>   
>> -	/* Increment the flag to let the boot CPU we're ready */
>> -1:	ldxr	w16, [flag_ptr]
>> -	add	w16, w16, #1
>> -	stxr	w17, w16, [flag_ptr]
>> -	cbnz	w17, 1b
>> +	.unreq	swapper_ttb
>> +	.unreq	flag_ptr
>> +SYM_FUNC_END(idmap_kpti_install_ng_mappings)
>> +	.popsection
>> +#endif
>>   
>> -	/* Wait for the boot CPU to finish messing around with swapper */
>> -	sevl
>> -1:	wfe
>> -	ldxr	w16, [flag_ptr]
>> -	cbnz	w16, 1b
>> +/*
>> + * Wait for repainting is done. Run on secondary CPUs
>> + * only.
>> + */
>> +	.pushsection	".data", "aw", %progbits
>> +SYM_DATA(repaint_done, .long 1)
>> +	.popsection
>>   
>> -	/* All done, act like nothing happened */
>> -	msr	ttbr1_el1, swapper_ttb
>> -	isb
>> -	ret
>> +	.pushsection ".idmap.text", "a"
>> +SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
>> +	swapper_ttb	.req	x0
>> +	flag_ptr	.req	x1
>> +	mrs     swapper_ttb, ttbr1_el1
>> +	adr_l   flag_ptr, repaint_done
>> +	__idmap_cpu_set_reserved_ttbr1 x16, x17
>> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>>   
>>   	.unreq	swapper_ttb
>>   	.unreq	flag_ptr
>> -SYM_FUNC_END(idmap_kpti_install_ng_mappings)
>> +SYM_FUNC_END(bbml2_wait_for_repainting)
>>   	.popsection
>> -#endif
> How about this instead?

Looks good to me.

Thanks,
Yang


>
> ---8<---
> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
> index 8c75965afc9e..a116b2b8ad59 100644
> --- a/arch/arm64/mm/proc.S
> +++ b/arch/arm64/mm/proc.S
> @@ -416,8 +416,30 @@ alternative_else_nop_endif
>   __idmap_kpti_secondary:
>   	/* Uninstall swapper before surgery begins */
>   	__idmap_cpu_set_reserved_ttbr1 x16, x17
> +	b scondary_cpu_wait
> +
> +	.unreq	swapper_ttb
> +	.unreq	flag_ptr
> +SYM_FUNC_END(idmap_kpti_install_ng_mappings)
> +	.popsection
> +#endif
> +
> +	.pushsection	".data", "aw", %progbits
> +SYM_DATA(repaint_done, .long 1)
> +	.popsection
> +
> +	.pushsection ".idmap.text", "a"
> +SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
> +	/* Must be same registers as in idmap_kpti_install_ng_mappings */
> +	swapper_ttb	.req	x3
> +	flag_ptr	.req	x4
> +
> +	mrs     swapper_ttb, ttbr1_el1
> +	adr_l   flag_ptr, repaint_done
> +	__idmap_cpu_set_reserved_ttbr1 x16, x17
>
>   	/* Increment the flag to let the boot CPU we're ready */
> +scondary_cpu_wait:
>   1:	ldxr	w16, [flag_ptr]
>   	add	w16, w16, #1
>   	stxr	w17, w16, [flag_ptr]
> @@ -436,9 +458,8 @@ __idmap_kpti_secondary:
>
>   	.unreq	swapper_ttb
>   	.unreq	flag_ptr
> -SYM_FUNC_END(idmap_kpti_install_ng_mappings)
> +SYM_FUNC_END(bbml2_wait_for_repainting)
>   	.popsection
> -#endif
>
>   /*
>    *	__cpu_setup
> ---8<---
>
> Thanks,
> Ryan
>
Re: [PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
Posted by Ryan Roberts 2 months ago
Hi Yang,

On 24/07/2025 23:11, Yang Shi wrote:
> The kernel linear mapping is painted in very early stage of system boot.
> The cpufeature has not been finalized yet at this point.  So the linear
> mapping is determined by the capability of boot CPU.  If the boot CPU
> supports BBML2, large block mapping will be used for linear mapping.
> 
> But the secondary CPUs may not support BBML2, so repaint the linear mapping
> if large block mapping is used and the secondary CPUs don't support BBML2
> once cpufeature is finalized on all CPUs.
> 
> If the boot CPU doesn't support BBML2 or the secondary CPUs have the
> same BBML2 capability with the boot CPU, repainting the linear mapping
> is not needed.
> 
> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
> ---
>  arch/arm64/include/asm/mmu.h   |   6 +-
>  arch/arm64/kernel/cpufeature.c |   8 ++
>  arch/arm64/mm/mmu.c            | 173 +++++++++++++++++++++++++++------
>  arch/arm64/mm/pageattr.c       |   2 +-
>  arch/arm64/mm/proc.S           |  57 ++++++++---
>  5 files changed, 196 insertions(+), 50 deletions(-)

[...]

> +int __init linear_map_split_to_ptes(void *__unused)
> +{
> +	typedef void (repaint_wait_fn)(void);
> +	extern repaint_wait_fn bbml2_wait_for_repainting;
> +	repaint_wait_fn *wait_fn;
> +
> +	int cpu = smp_processor_id();
> +
> +	wait_fn = (void *)__pa_symbol(bbml2_wait_for_repainting);
> +
> +	/*
> +	 * Repainting just can be run on CPU 0 because we just can be sure
> +	 * CPU 0 supports BBML2.
> +	 */
> +	if (!cpu) {
> +		phys_addr_t kernel_start = __pa_symbol(_stext);
> +		phys_addr_t kernel_end = __pa_symbol(__init_begin);
> +		phys_addr_t start, end;
> +		unsigned long vstart, vend;
> +		int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> +		u64 i;
> +		int ret;
> +
> +		/*
> +		 * Wait for all secondary CPUs get prepared for repainting
> +		 * the linear mapping.
> +		 */
> +		smp_cond_load_acquire(&repaint_done, VAL == num_online_cpus());
> +
> +		memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
> +		/* Split the whole linear mapping */
> +		for_each_mem_range(i, &start, &end) {
> +			if (start >= end)
> +				return -EINVAL;
> +
> +			vstart = __phys_to_virt(start);
> +			vend = __phys_to_virt(end);
> +			ret = split_kernel_pgtable_mapping(vstart, vend, flags);

I've been thinking about this; I think the best approach is to use the pagewalk
API here, then you don't need to implement your own pgtable walker; you can just
implement the pud, pmd and pte callbacks to do the splitting and they can reuse
common split helper functions. This reduces code size quite a bit I think. And
also means that for split_kernel_pgtable_mapping() you can just pass a single
address and don't need to iterate over every entry.

I started prototyping this to prove to myself that it is possible and ended up
with quite a clean implementation. I'm going to post that as a v6 RFC shortly -
I hope that's ok. I've retained you as primary author since it's all based on
your work. I'm hoping that the posting will speed up review so we can hopefully
get this into 6.18.

Thanks,
Ryan

> +			if (ret)
> +				panic("Failed to split linear mappings\n");
> +
> +			flush_tlb_kernel_range(vstart, vend);
> +		}
> +		memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
> +
> +		/*
> +		 * Relies on dsb in flush_tlb_kernel_range() to avoid
> +		 * reordering before any page table split operations.
> +		 */
> +		WRITE_ONCE(repaint_done, 0);
> +	} else {
> +		/*
> +		 * The secondary CPUs can't run in the same address space
> +		 * with CPU 0 because accessing the linear mapping address
> +		 * when CPU 0 is repainting it is not safe.
> +		 *
> +		 * Let the secondary CPUs run busy loop in idmap address
> +		 * space when repainting is ongoing.
> +		 */
> +		cpu_install_idmap();
> +		wait_fn();
> +		cpu_uninstall_idmap();
> +	}
> +
> +	return 0;
> +}
> +
>  #ifdef CONFIG_KFENCE
>  
>  bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
> @@ -1079,7 +1174,8 @@ void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
>  		    int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
>  
>  static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
> -	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
> +	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
> +	  bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
>  
>  static void __init create_idmap(void)
>  {
> @@ -1104,6 +1200,19 @@ static void __init create_idmap(void)
>  			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
>  			       __phys_to_virt(ptep) - ptep);
>  	}
> +
> +	/*
> +	 * Setup idmap mapping for repaint_done flag.  It will be used if
> +	 * repainting the linear mapping is needed later.
> +	 */
> +	if (linear_map_requires_bbml2) {
> +		u64 pa = __pa_symbol(&repaint_done);
> +		ptep = __pa_symbol(bbml2_ptes);
> +
> +		__pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
> +			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
> +			       __phys_to_virt(ptep) - ptep);
> +	}
>  }
>  
>  void __init paging_init(void)
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 6566aa9d8abb..4471d7e510a1 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -140,7 +140,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
>  	data.set_mask = set_mask;
>  	data.clear_mask = clear_mask;
>  
> -	ret = split_kernel_pgtable_mapping(start, start + size);
> +	ret = split_kernel_pgtable_mapping(start, start + size, 0);
>  	if (WARN_ON_ONCE(ret))
>  		return ret;
>  
> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
> index 80d470aa469d..f0f9c49a4466 100644
> --- a/arch/arm64/mm/proc.S
> +++ b/arch/arm64/mm/proc.S
> @@ -239,6 +239,25 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
>  	dsb	nshst
>  	.endm
>  
> +	.macro wait_for_boot_cpu, tmp1, tmp2, tmp3, tmp4
> +	/* Increment the flag to let the boot CPU know we're ready */
> +1:	ldxr	\tmp3, [\tmp2]
> +	add	\tmp3, \tmp3, #1
> +	stxr	\tmp4, \tmp3, [\tmp2]
> +	cbnz	\tmp4, 1b
> +
> +	/* Wait for the boot CPU to finish its job */
> +	sevl
> +1:	wfe
> +	ldxr	\tmp3, [\tmp2]
> +	cbnz	\tmp3, 1b
> +
> +	/* All done, act like nothing happened */
> +	msr	ttbr1_el1, \tmp1
> +	isb
> +	ret
> +	.endm
> +
>  /*
>   * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
>   *				   unsigned long temp_pte_va)
> @@ -416,29 +435,35 @@ alternative_else_nop_endif
>  __idmap_kpti_secondary:
>  	/* Uninstall swapper before surgery begins */
>  	__idmap_cpu_set_reserved_ttbr1 x16, x17
> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>  
> -	/* Increment the flag to let the boot CPU we're ready */
> -1:	ldxr	w16, [flag_ptr]
> -	add	w16, w16, #1
> -	stxr	w17, w16, [flag_ptr]
> -	cbnz	w17, 1b
> +	.unreq	swapper_ttb
> +	.unreq	flag_ptr
> +SYM_FUNC_END(idmap_kpti_install_ng_mappings)
> +	.popsection
> +#endif
>  
> -	/* Wait for the boot CPU to finish messing around with swapper */
> -	sevl
> -1:	wfe
> -	ldxr	w16, [flag_ptr]
> -	cbnz	w16, 1b
> +/*
> + * Wait for repainting is done. Run on secondary CPUs
> + * only.
> + */
> +	.pushsection	".data", "aw", %progbits
> +SYM_DATA(repaint_done, .long 1)
> +	.popsection
>  
> -	/* All done, act like nothing happened */
> -	msr	ttbr1_el1, swapper_ttb
> -	isb
> -	ret
> +	.pushsection ".idmap.text", "a"
> +SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
> +	swapper_ttb	.req	x0
> +	flag_ptr	.req	x1
> +	mrs     swapper_ttb, ttbr1_el1
> +	adr_l   flag_ptr, repaint_done
> +	__idmap_cpu_set_reserved_ttbr1 x16, x17
> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>  
>  	.unreq	swapper_ttb
>  	.unreq	flag_ptr
> -SYM_FUNC_END(idmap_kpti_install_ng_mappings)
> +SYM_FUNC_END(bbml2_wait_for_repainting)
>  	.popsection
> -#endif
>  
>  /*
>   *	__cpu_setup
Re: [PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
Posted by Yang Shi 2 months ago

On 8/5/25 12:54 AM, Ryan Roberts wrote:
> Hi Yang,
>
> On 24/07/2025 23:11, Yang Shi wrote:
>> The kernel linear mapping is painted in very early stage of system boot.
>> The cpufeature has not been finalized yet at this point.  So the linear
>> mapping is determined by the capability of boot CPU.  If the boot CPU
>> supports BBML2, large block mapping will be used for linear mapping.
>>
>> But the secondary CPUs may not support BBML2, so repaint the linear mapping
>> if large block mapping is used and the secondary CPUs don't support BBML2
>> once cpufeature is finalized on all CPUs.
>>
>> If the boot CPU doesn't support BBML2 or the secondary CPUs have the
>> same BBML2 capability with the boot CPU, repainting the linear mapping
>> is not needed.
>>
>> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
>> ---
>>   arch/arm64/include/asm/mmu.h   |   6 +-
>>   arch/arm64/kernel/cpufeature.c |   8 ++
>>   arch/arm64/mm/mmu.c            | 173 +++++++++++++++++++++++++++------
>>   arch/arm64/mm/pageattr.c       |   2 +-
>>   arch/arm64/mm/proc.S           |  57 ++++++++---
>>   5 files changed, 196 insertions(+), 50 deletions(-)
> [...]
>
>> +int __init linear_map_split_to_ptes(void *__unused)
>> +{
>> +	typedef void (repaint_wait_fn)(void);
>> +	extern repaint_wait_fn bbml2_wait_for_repainting;
>> +	repaint_wait_fn *wait_fn;
>> +
>> +	int cpu = smp_processor_id();
>> +
>> +	wait_fn = (void *)__pa_symbol(bbml2_wait_for_repainting);
>> +
>> +	/*
>> +	 * Repainting just can be run on CPU 0 because we just can be sure
>> +	 * CPU 0 supports BBML2.
>> +	 */
>> +	if (!cpu) {
>> +		phys_addr_t kernel_start = __pa_symbol(_stext);
>> +		phys_addr_t kernel_end = __pa_symbol(__init_begin);
>> +		phys_addr_t start, end;
>> +		unsigned long vstart, vend;
>> +		int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>> +		u64 i;
>> +		int ret;
>> +
>> +		/*
>> +		 * Wait for all secondary CPUs get prepared for repainting
>> +		 * the linear mapping.
>> +		 */
>> +		smp_cond_load_acquire(&repaint_done, VAL == num_online_cpus());
>> +
>> +		memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
>> +		/* Split the whole linear mapping */
>> +		for_each_mem_range(i, &start, &end) {
>> +			if (start >= end)
>> +				return -EINVAL;
>> +
>> +			vstart = __phys_to_virt(start);
>> +			vend = __phys_to_virt(end);
>> +			ret = split_kernel_pgtable_mapping(vstart, vend, flags);

Hi Ryan,

> I've been thinking about this; I think the best approach is to use the pagewalk
> API here, then you don't need to implement your own pgtable walker; you can just
> implement the pud, pmd and pte callbacks to do the splitting and they can reuse
> common split helper functions. This reduces code size quite a bit I think. And
> also means that for split_kernel_pgtable_mapping() you can just pass a single
> address and don't need to iterate over every entry.

Using pgtable walker API is fine to me. The biggest concern is how to 
reuse split code for repainting. I think it basically solves the problem.

>
> I started prototyping this to prove to myself that it is possible and ended up
> with quite a clean implementation. I'm going to post that as a v6 RFC shortly -
> I hope that's ok. I've retained you as primary author since it's all based on
> your work. I'm hoping that the posting will speed up review so we can hopefully
> get this into 6.18.

Thank you for making the prototype. I will take a look that and reply in 
that series directly.

Regards,
Yang

>
> Thanks,
> Ryan
>
>> +			if (ret)
>> +				panic("Failed to split linear mappings\n");
>> +
>> +			flush_tlb_kernel_range(vstart, vend);
>> +		}
>> +		memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
>> +
>> +		/*
>> +		 * Relies on dsb in flush_tlb_kernel_range() to avoid
>> +		 * reordering before any page table split operations.
>> +		 */
>> +		WRITE_ONCE(repaint_done, 0);
>> +	} else {
>> +		/*
>> +		 * The secondary CPUs can't run in the same address space
>> +		 * with CPU 0 because accessing the linear mapping address
>> +		 * when CPU 0 is repainting it is not safe.
>> +		 *
>> +		 * Let the secondary CPUs run busy loop in idmap address
>> +		 * space when repainting is ongoing.
>> +		 */
>> +		cpu_install_idmap();
>> +		wait_fn();
>> +		cpu_uninstall_idmap();
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>>   #ifdef CONFIG_KFENCE
>>   
>>   bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
>> @@ -1079,7 +1174,8 @@ void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
>>   		    int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
>>   
>>   static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
>> -	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
>> +	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
>> +	  bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
>>   
>>   static void __init create_idmap(void)
>>   {
>> @@ -1104,6 +1200,19 @@ static void __init create_idmap(void)
>>   			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
>>   			       __phys_to_virt(ptep) - ptep);
>>   	}
>> +
>> +	/*
>> +	 * Setup idmap mapping for repaint_done flag.  It will be used if
>> +	 * repainting the linear mapping is needed later.
>> +	 */
>> +	if (linear_map_requires_bbml2) {
>> +		u64 pa = __pa_symbol(&repaint_done);
>> +		ptep = __pa_symbol(bbml2_ptes);
>> +
>> +		__pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
>> +			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
>> +			       __phys_to_virt(ptep) - ptep);
>> +	}
>>   }
>>   
>>   void __init paging_init(void)
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index 6566aa9d8abb..4471d7e510a1 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -140,7 +140,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
>>   	data.set_mask = set_mask;
>>   	data.clear_mask = clear_mask;
>>   
>> -	ret = split_kernel_pgtable_mapping(start, start + size);
>> +	ret = split_kernel_pgtable_mapping(start, start + size, 0);
>>   	if (WARN_ON_ONCE(ret))
>>   		return ret;
>>   
>> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
>> index 80d470aa469d..f0f9c49a4466 100644
>> --- a/arch/arm64/mm/proc.S
>> +++ b/arch/arm64/mm/proc.S
>> @@ -239,6 +239,25 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
>>   	dsb	nshst
>>   	.endm
>>   
>> +	.macro wait_for_boot_cpu, tmp1, tmp2, tmp3, tmp4
>> +	/* Increment the flag to let the boot CPU know we're ready */
>> +1:	ldxr	\tmp3, [\tmp2]
>> +	add	\tmp3, \tmp3, #1
>> +	stxr	\tmp4, \tmp3, [\tmp2]
>> +	cbnz	\tmp4, 1b
>> +
>> +	/* Wait for the boot CPU to finish its job */
>> +	sevl
>> +1:	wfe
>> +	ldxr	\tmp3, [\tmp2]
>> +	cbnz	\tmp3, 1b
>> +
>> +	/* All done, act like nothing happened */
>> +	msr	ttbr1_el1, \tmp1
>> +	isb
>> +	ret
>> +	.endm
>> +
>>   /*
>>    * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
>>    *				   unsigned long temp_pte_va)
>> @@ -416,29 +435,35 @@ alternative_else_nop_endif
>>   __idmap_kpti_secondary:
>>   	/* Uninstall swapper before surgery begins */
>>   	__idmap_cpu_set_reserved_ttbr1 x16, x17
>> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>>   
>> -	/* Increment the flag to let the boot CPU we're ready */
>> -1:	ldxr	w16, [flag_ptr]
>> -	add	w16, w16, #1
>> -	stxr	w17, w16, [flag_ptr]
>> -	cbnz	w17, 1b
>> +	.unreq	swapper_ttb
>> +	.unreq	flag_ptr
>> +SYM_FUNC_END(idmap_kpti_install_ng_mappings)
>> +	.popsection
>> +#endif
>>   
>> -	/* Wait for the boot CPU to finish messing around with swapper */
>> -	sevl
>> -1:	wfe
>> -	ldxr	w16, [flag_ptr]
>> -	cbnz	w16, 1b
>> +/*
>> + * Wait for repainting is done. Run on secondary CPUs
>> + * only.
>> + */
>> +	.pushsection	".data", "aw", %progbits
>> +SYM_DATA(repaint_done, .long 1)
>> +	.popsection
>>   
>> -	/* All done, act like nothing happened */
>> -	msr	ttbr1_el1, swapper_ttb
>> -	isb
>> -	ret
>> +	.pushsection ".idmap.text", "a"
>> +SYM_TYPED_FUNC_START(bbml2_wait_for_repainting)
>> +	swapper_ttb	.req	x0
>> +	flag_ptr	.req	x1
>> +	mrs     swapper_ttb, ttbr1_el1
>> +	adr_l   flag_ptr, repaint_done
>> +	__idmap_cpu_set_reserved_ttbr1 x16, x17
>> +	wait_for_boot_cpu swapper_ttb, flag_ptr, w16, w17
>>   
>>   	.unreq	swapper_ttb
>>   	.unreq	flag_ptr
>> -SYM_FUNC_END(idmap_kpti_install_ng_mappings)
>> +SYM_FUNC_END(bbml2_wait_for_repainting)
>>   	.popsection
>> -#endif
>>   
>>   /*
>>    *	__cpu_setup
Re: [PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
Posted by kernel test robot 2 months, 1 week ago
Hi Yang,

kernel test robot noticed the following build warnings:

[auto build test WARNING on next-20250724]
[cannot apply to arm64/for-next/core akpm-mm/mm-everything v6.16-rc7 v6.16-rc6 v6.16-rc5 linus/master v6.16-rc7]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Yang-Shi/arm64-Enable-permission-change-on-arm64-kernel-block-mappings/20250725-061534
base:   next-20250724
patch link:    https://lore.kernel.org/r/20250724221216.1998696-5-yang%40os.amperecomputing.com
patch subject: [PATCH 4/4] arm64: mm: split linear mapping if BBML2 is not supported on secondary CPUs
config: arm64-randconfig-001-20250726 (https://download.01.org/0day-ci/archive/20250726/202507261822.ikaBRFsG-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 8.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250726/202507261822.ikaBRFsG-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202507261822.ikaBRFsG-lkp@intel.com/

All warnings (new ones prefixed by >>):

   arch/arm64/mm/mmu.o: in function `linear_map_split_to_ptes':
   mmu.c:(.init.text+0x23c): relocation truncated to fit: R_AARCH64_LDST32_ABS_LO12_NC against symbol `repaint_done' defined in .data section in arch/arm64/mm/proc.o
>> aarch64-linux-ld: mmu.c:(.init.text+0x23c): warning: one possible cause of this error is that the symbol is being referenced in the indicated code as if it had a larger alignment than was declared where it was defined

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki