[PATCH 3/4] arm64: mm: support large block mapping when rodata=full

Yang Shi posted 4 patches 6 months, 2 weeks ago
There is a newer version of this series
[PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 6 months, 2 weeks ago
When rodata=full is specified, kernel linear mapping has to be mapped at
PTE level since large page table can't be split due to break-before-make
rule on ARM64.

This resulted in a couple of problems:
  - performance degradation
  - more TLB pressure
  - memory waste for kernel page table

With FEAT_BBM level 2 support, splitting large block page table to
smaller ones doesn't need to make the page table entry invalid anymore.
This allows kernel split large block mapping on the fly.

Add kernel page table split support and use large block mapping by
default when FEAT_BBM level 2 is supported for rodata=full.  When
changing permissions for kernel linear mapping, the page table will be
split to smaller size.

The machine without FEAT_BBM level 2 will fallback to have kernel linear
mapping PTE-mapped when rodata=full.

With this we saw significant performance boost with some benchmarks and
much less memory consumption on my AmpereOne machine (192 cores, 1P) with
256GB memory.

* Memory use after boot
Before:
MemTotal:       258988984 kB
MemFree:        254821700 kB

After:
MemTotal:       259505132 kB
MemFree:        255410264 kB

Around 500MB more memory are free to use.  The larger the machine, the
more memory saved.

* Memcached
We saw performance degradation when running Memcached benchmark with
rodata=full vs rodata=on.  Our profiling pointed to kernel TLB pressure.
With this patchset we saw ops/sec is increased by around 3.5%, P99
latency is reduced by around 9.6%.
The gain mainly came from reduced kernel TLB misses.  The kernel TLB
MPKI is reduced by 28.5%.

The benchmark data is now on par with rodata=on too.

* Disk encryption (dm-crypt) benchmark
Ran fio benchmark with the below command on a 128G ramdisk (ext4) with disk
encryption (by dm-crypt).
fio --directory=/data --random_generator=lfsr --norandommap --randrepeat 1 \
    --status-interval=999 --rw=write --bs=4k --loops=1 --ioengine=sync \
    --iodepth=1 --numjobs=1 --fsync_on_close=1 --group_reporting --thread \
    --name=iops-test-job --eta-newline=1 --size 100G

The IOPS is increased by 90% - 150% (the variance is high, but the worst
number of good case is around 90% more than the best number of bad case).
The bandwidth is increased and the avg clat is reduced proportionally.

* Sequential file read
Read 100G file sequentially on XFS (xfs_io read with page cache populated).
The bandwidth is increased by 150%.

Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
---
 arch/arm64/include/asm/cpufeature.h |  26 +++
 arch/arm64/include/asm/mmu.h        |   1 +
 arch/arm64/include/asm/pgtable.h    |  12 +-
 arch/arm64/kernel/cpufeature.c      |   2 +-
 arch/arm64/mm/mmu.c                 | 269 +++++++++++++++++++++++++---
 arch/arm64/mm/pageattr.c            |  37 +++-
 6 files changed, 319 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8f36ffa16b73..a95806980298 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -1053,6 +1053,32 @@ static inline bool cpu_has_lpa2(void)
 #endif
 }
 
+bool cpu_has_bbml2_noabort(unsigned int cpu_midr);
+
+static inline bool has_nobbml2_override(void)
+{
+	u64 mmfr2;
+	unsigned int bbm;
+
+	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+	mmfr2 &= ~id_aa64mmfr2_override.mask;
+	mmfr2 |= id_aa64mmfr2_override.val;
+	bbm = cpuid_feature_extract_unsigned_field(mmfr2,
+						   ID_AA64MMFR2_EL1_BBM_SHIFT);
+	return bbm == 0;
+}
+
+/*
+ * Called at early boot stage on boot CPU before cpu info and cpu feature
+ * are ready.
+ */
+static inline bool bbml2_noabort_available(void)
+{
+	return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
+	       cpu_has_bbml2_noabort(read_cpuid_id()) &&
+	       !has_nobbml2_override();
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 6e8aa8e72601..2693d63bf837 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -71,6 +71,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       pgprot_t prot, bool page_mappings_only);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 extern void mark_linear_text_alias_ro(void);
+extern int split_linear_mapping(unsigned long start, unsigned long end);
 
 /*
  * This check is triggered during the early boot before the cpufeature
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d3b538be1500..bf3cef31d243 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -293,6 +293,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
 }
 
+static inline pmd_t pmd_mknoncont(pmd_t pmd)
+{
+	return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
+}
+
 static inline pte_t pte_mkdevmap(pte_t pte)
 {
 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
@@ -769,7 +774,7 @@ static inline bool in_swapper_pgdir(void *addr)
 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
 }
 
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+static inline void __set_pmd_nosync(pmd_t *pmdp, pmd_t pmd)
 {
 #ifdef __PAGETABLE_PMD_FOLDED
 	if (in_swapper_pgdir(pmdp)) {
@@ -779,6 +784,11 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 	WRITE_ONCE(*pmdp, pmd);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+	__set_pmd_nosync(pmdp, pmd);
 
 	if (pmd_valid(pmd)) {
 		dsb(ishst);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e879bfcf853b..5fc2a4a804de 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2209,7 +2209,7 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
 	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
 }
 
-static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
+bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
 {
 	/*
 	 * We want to allow usage of bbml2 in as wide a range of kernel contexts
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 775c0536b194..4c5d3aa35d62 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,6 +45,7 @@
 #define NO_BLOCK_MAPPINGS	BIT(0)
 #define NO_CONT_MAPPINGS	BIT(1)
 #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
+#define SPLIT_MAPPINGS		BIT(3)
 
 u64 kimage_voffset __ro_after_init;
 EXPORT_SYMBOL(kimage_voffset);
@@ -166,12 +167,91 @@ static void init_clear_pgtable(void *table)
 	dsb(ishst);
 }
 
+static void split_cont_pte(pte_t *ptep)
+{
+	pte_t *_ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
+	pte_t _pte;
+
+	for (int i = 0; i < CONT_PTES; i++, _ptep++) {
+		_pte = READ_ONCE(*_ptep);
+		_pte = pte_mknoncont(_pte);
+		__set_pte_nosync(_ptep, _pte);
+	}
+
+	dsb(ishst);
+	isb();
+}
+
+static void split_cont_pmd(pmd_t *pmdp)
+{
+	pmd_t *_pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS);
+	pmd_t _pmd;
+
+	for (int i = 0; i < CONT_PMDS; i++, _pmdp++) {
+		_pmd = READ_ONCE(*_pmdp);
+		_pmd = pmd_mknoncont(_pmd);
+		set_pmd(_pmdp, _pmd);
+	}
+}
+
+static void split_pmd(pmd_t pmd, phys_addr_t pte_phys, int flags)
+{
+	pte_t *ptep;
+	unsigned long pfn;
+	pgprot_t prot;
+
+	pfn = pmd_pfn(pmd);
+	prot = pmd_pgprot(pmd);
+	prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PTE_TYPE_PAGE);
+
+	ptep = (pte_t *)phys_to_virt(pte_phys);
+
+	/* It must be naturally aligned if PMD is leaf */
+	if ((flags & NO_CONT_MAPPINGS) == 0)
+		prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+	for (int i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
+		__set_pte_nosync(ptep, pfn_pte(pfn, prot));
+
+	dsb(ishst);
+}
+
+static void split_pud(pud_t pud, phys_addr_t pmd_phys, int flags)
+{
+	pmd_t *pmdp;
+	unsigned long pfn;
+	pgprot_t prot;
+	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
+
+	pfn = pud_pfn(pud);
+	prot = pud_pgprot(pud);
+	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
+
+	/* It must be naturally aligned if PUD is leaf */
+	if ((flags & NO_CONT_MAPPINGS) == 0)
+		prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+	for (int i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
+		__set_pmd_nosync(pmdp, pfn_pmd(pfn, prot));
+		pfn += step;
+	}
+
+	dsb(ishst);
+}
+
 static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot)
+		     phys_addr_t phys, pgprot_t prot, int flags)
 {
 	do {
 		pte_t old_pte = __ptep_get(ptep);
 
+		if (flags & SPLIT_MAPPINGS) {
+			if (pte_cont(old_pte))
+				split_cont_pte(ptep);
+
+			continue;
+		}
+
 		/*
 		 * Required barriers to make this visible to the table walker
 		 * are deferred to the end of alloc_init_cont_pte().
@@ -199,11 +279,20 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 	pmd_t pmd = READ_ONCE(*pmdp);
 	pte_t *ptep;
 	int ret = 0;
+	bool split = flags & SPLIT_MAPPINGS;
+	pmdval_t pmdval;
+	phys_addr_t pte_phys;
 
-	BUG_ON(pmd_sect(pmd));
-	if (pmd_none(pmd)) {
-		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
-		phys_addr_t pte_phys;
+	if (!split)
+		BUG_ON(pmd_sect(pmd));
+
+	if (pmd_none(pmd) && split) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (pmd_none(pmd) || (split && pmd_leaf(pmd))) {
+		pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
 
 		if (flags & NO_EXEC_MAPPINGS)
 			pmdval |= PMD_TABLE_PXN;
@@ -213,6 +302,18 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 			ret = -ENOMEM;
 			goto out;
 		}
+	}
+
+	if (split) {
+		if (pmd_leaf(pmd)) {
+			split_pmd(pmd, pte_phys, flags);
+			__pmd_populate(pmdp, pte_phys, pmdval);
+		}
+		ptep = pte_offset_kernel(pmdp, addr);
+		goto split_pgtable;
+	}
+
+	if (pmd_none(pmd)) {
 		ptep = pte_set_fixmap(pte_phys);
 		init_clear_pgtable(ptep);
 		ptep += pte_index(addr);
@@ -222,17 +323,28 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 		ptep = pte_set_fixmap_offset(pmdp, addr);
 	}
 
+split_pgtable:
 	do {
 		pgprot_t __prot = prot;
 
 		next = pte_cont_addr_end(addr, end);
 
+		if (split) {
+			pte_t pteval = READ_ONCE(*ptep);
+			bool cont = pte_cont(pteval);
+
+			if (cont &&
+			    ((addr | next) & ~CONT_PTE_MASK) == 0 &&
+			    (flags & NO_CONT_MAPPINGS) == 0)
+				continue;
+		}
+
 		/* use a contiguous mapping if the range is suitably aligned */
 		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
 		    (flags & NO_CONT_MAPPINGS) == 0)
 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 
-		init_pte(ptep, addr, next, phys, __prot);
+		init_pte(ptep, addr, next, phys, __prot, flags);
 
 		ptep += pte_index(next) - pte_index(addr);
 		phys += next - addr;
@@ -243,7 +355,8 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 	 * ensure that all previous pgtable writes are visible to the table
 	 * walker.
 	 */
-	pte_clear_fixmap();
+	if (!split)
+		pte_clear_fixmap();
 
 out:
 	return ret;
@@ -255,15 +368,29 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
 {
 	unsigned long next;
 	int ret = 0;
+	bool split = flags & SPLIT_MAPPINGS;
+	bool cont;
 
 	do {
 		pmd_t old_pmd = READ_ONCE(*pmdp);
 
 		next = pmd_addr_end(addr, end);
 
+		if (split && pmd_leaf(old_pmd)) {
+			cont = pgprot_val(pmd_pgprot(old_pmd)) & PTE_CONT;
+			if (cont)
+				split_cont_pmd(pmdp);
+
+			/* The PMD is fully contained in the range */
+			if (((addr | next) & ~PMD_MASK) == 0 &&
+			    (flags & NO_BLOCK_MAPPINGS) == 0)
+				continue;
+		}
+
 		/* try section mapping first */
 		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
-		    (flags & NO_BLOCK_MAPPINGS) == 0) {
+		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
+		    (flags & SPLIT_MAPPINGS) == 0) {
 			pmd_set_huge(pmdp, phys, prot);
 
 			/*
@@ -278,7 +405,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
 			if (ret)
 				break;
 
-			BUG_ON(pmd_val(old_pmd) != 0 &&
+			BUG_ON(!split && pmd_val(old_pmd) != 0 &&
 			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
 		}
 		phys += next - addr;
@@ -296,14 +423,23 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 	int ret = 0;
 	pud_t pud = READ_ONCE(*pudp);
 	pmd_t *pmdp;
+	bool split = flags & SPLIT_MAPPINGS;
+	pudval_t pudval;
+	phys_addr_t pmd_phys;
 
 	/*
 	 * Check for initial section mappings in the pgd/pud.
 	 */
-	BUG_ON(pud_sect(pud));
-	if (pud_none(pud)) {
-		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
-		phys_addr_t pmd_phys;
+	if (!split)
+		BUG_ON(pud_sect(pud));
+
+	if (pud_none(pud) && split) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (pud_none(pud) || (split && pud_leaf(pud))) {
+		pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
 
 		if (flags & NO_EXEC_MAPPINGS)
 			pudval |= PUD_TABLE_PXN;
@@ -313,6 +449,18 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 			ret = -ENOMEM;
 			goto out;
 		}
+	}
+
+	if (split) {
+		if (pud_leaf(pud)) {
+			split_pud(pud, pmd_phys, flags);
+			__pud_populate(pudp, pmd_phys, pudval);
+		}
+		pmdp = pmd_offset(pudp, addr);
+		goto split_pgtable;
+	}
+
+	if (pud_none(pud)) {
 		pmdp = pmd_set_fixmap(pmd_phys);
 		init_clear_pgtable(pmdp);
 		pmdp += pmd_index(addr);
@@ -322,11 +470,22 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 		pmdp = pmd_set_fixmap_offset(pudp, addr);
 	}
 
+split_pgtable:
 	do {
 		pgprot_t __prot = prot;
 
 		next = pmd_cont_addr_end(addr, end);
 
+		if (split) {
+			pmd_t pmdval = READ_ONCE(*pmdp);
+			bool cont = pgprot_val(pmd_pgprot(pmdval)) & PTE_CONT;
+
+			if (cont &&
+			    ((addr | next) & ~CONT_PMD_MASK) == 0 &&
+			    (flags & NO_CONT_MAPPINGS) == 0)
+				continue;
+		}
+
 		/* use a contiguous mapping if the range is suitably aligned */
 		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
 		    (flags & NO_CONT_MAPPINGS) == 0)
@@ -340,7 +499,8 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 		phys += next - addr;
 	} while (addr = next, addr != end);
 
-	pmd_clear_fixmap();
+	if (!split)
+		pmd_clear_fixmap();
 
 out:
 	return ret;
@@ -355,6 +515,16 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
 	int ret = 0;
 	p4d_t p4d = READ_ONCE(*p4dp);
 	pud_t *pudp;
+	bool split = flags & SPLIT_MAPPINGS;
+
+	if (split) {
+		if (p4d_none(p4d)) {
+			ret= -EINVAL;
+			goto out;
+		}
+		pudp = pud_offset(p4dp, addr);
+		goto split_pgtable;
+	}
 
 	if (p4d_none(p4d)) {
 		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF;
@@ -377,17 +547,26 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
 		pudp = pud_set_fixmap_offset(p4dp, addr);
 	}
 
+split_pgtable:
 	do {
 		pud_t old_pud = READ_ONCE(*pudp);
 
 		next = pud_addr_end(addr, end);
 
+		if (split && pud_leaf(old_pud)) {
+			/* The PUD is fully contained in the range */
+			if (((addr | next) & ~PUD_MASK) == 0 &&
+			    (flags & NO_BLOCK_MAPPINGS) == 0)
+				continue;
+		}
+
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
 		if (pud_sect_supported() &&
 		   ((addr | next | phys) & ~PUD_MASK) == 0 &&
-		    (flags & NO_BLOCK_MAPPINGS) == 0) {
+		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
+		    (flags & SPLIT_MAPPINGS) == 0) {
 			pud_set_huge(pudp, phys, prot);
 
 			/*
@@ -402,13 +581,14 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
 			if (ret)
 				break;
 
-			BUG_ON(pud_val(old_pud) != 0 &&
+			BUG_ON(!split && pud_val(old_pud) != 0 &&
 			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
 		}
 		phys += next - addr;
 	} while (pudp++, addr = next, addr != end);
 
-	pud_clear_fixmap();
+	if (!split)
+		pud_clear_fixmap();
 
 out:
 	return ret;
@@ -423,6 +603,16 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
 	int ret = 0;
 	pgd_t pgd = READ_ONCE(*pgdp);
 	p4d_t *p4dp;
+	bool split = flags & SPLIT_MAPPINGS;
+
+	if (split) {
+		if (pgd_none(pgd)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		p4dp = p4d_offset(pgdp, addr);
+		goto split_pgtable;
+	}
 
 	if (pgd_none(pgd)) {
 		pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF;
@@ -445,6 +635,7 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
 		p4dp = p4d_set_fixmap_offset(pgdp, addr);
 	}
 
+split_pgtable:
 	do {
 		p4d_t old_p4d = READ_ONCE(*p4dp);
 
@@ -461,7 +652,8 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
 		phys += next - addr;
 	} while (p4dp++, addr = next, addr != end);
 
-	p4d_clear_fixmap();
+	if (!split)
+		p4d_clear_fixmap();
 
 out:
 	return ret;
@@ -557,6 +749,25 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
 	return pa;
 }
 
+int split_linear_mapping(unsigned long start, unsigned long end)
+{
+	int ret = 0;
+
+	if (!system_supports_bbml2_noabort())
+		return 0;
+
+	mmap_write_lock(&init_mm);
+	/* NO_EXEC_MAPPINGS is needed when splitting linear map */
+	ret = __create_pgd_mapping_locked(init_mm.pgd, virt_to_phys((void *)start),
+					  start, (end - start), __pgprot(0),
+					  __pgd_pgtable_alloc,
+					  NO_EXEC_MAPPINGS | SPLIT_MAPPINGS);
+	mmap_write_unlock(&init_mm);
+	flush_tlb_kernel_range(start, end);
+
+	return ret;
+}
+
 /*
  * This function can only be used to modify existing table entries,
  * without allocating new levels of table. Note that this permits the
@@ -676,6 +887,24 @@ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) {
 
 #endif /* CONFIG_KFENCE */
 
+static inline bool force_pte_mapping(void)
+{
+	/*
+	 * Can't use cpufeature API to determine whether BBML2 supported
+	 * or not since cpufeature have not been finalized yet.
+	 *
+	 * Checking the boot CPU only for now.  If the boot CPU has
+	 * BBML2, paint linear mapping with block mapping.  If it turns
+	 * out the secondary CPUs don't support BBML2 once cpufeature is
+	 * fininalized, the linear mapping will be repainted with PTE
+	 * mapping.
+	 */
+	return (rodata_full && !bbml2_noabort_available()) ||
+		debug_pagealloc_enabled() ||
+		arm64_kfence_can_set_direct_map() ||
+		is_realm_world();
+}
+
 static void __init map_mem(pgd_t *pgdp)
 {
 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -701,7 +930,7 @@ static void __init map_mem(pgd_t *pgdp)
 
 	early_kfence_pool = arm64_kfence_alloc_pool();
 
-	if (can_set_direct_map())
+	if (force_pte_mapping())
 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
 	/*
@@ -1402,7 +1631,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
 
 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
 
-	if (can_set_direct_map())
+	if (force_pte_mapping())
 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
 	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 39fd1f7ff02a..25c068712cb5 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -10,6 +10,7 @@
 #include <linux/vmalloc.h>
 
 #include <asm/cacheflush.h>
+#include <asm/mmu.h>
 #include <asm/pgtable-prot.h>
 #include <asm/set_memory.h>
 #include <asm/tlbflush.h>
@@ -42,6 +43,8 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
 	struct page_change_data *cdata = data;
 	pte_t pte = __ptep_get(ptep);
 
+	BUG_ON(pte_cont(pte));
+
 	pte = clear_pte_bit(pte, cdata->clear_mask);
 	pte = set_pte_bit(pte, cdata->set_mask);
 
@@ -80,8 +83,9 @@ static int change_memory_common(unsigned long addr, int numpages,
 	unsigned long start = addr;
 	unsigned long size = PAGE_SIZE * numpages;
 	unsigned long end = start + size;
+	unsigned long l_start;
 	struct vm_struct *area;
-	int i;
+	int i, ret;
 
 	if (!PAGE_ALIGNED(addr)) {
 		start &= PAGE_MASK;
@@ -118,7 +122,12 @@ static int change_memory_common(unsigned long addr, int numpages,
 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
 		for (i = 0; i < area->nr_pages; i++) {
-			__change_memory_common((u64)page_address(area->pages[i]),
+			l_start = (u64)page_address(area->pages[i]);
+			ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
+			if (WARN_ON_ONCE(ret))
+				return ret;
+
+			__change_memory_common(l_start,
 					       PAGE_SIZE, set_mask, clear_mask);
 		}
 	}
@@ -174,6 +183,9 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
 
 int set_direct_map_invalid_noflush(struct page *page)
 {
+	unsigned long l_start;
+	int ret;
+
 	struct page_change_data data = {
 		.set_mask = __pgprot(0),
 		.clear_mask = __pgprot(PTE_VALID),
@@ -182,13 +194,21 @@ int set_direct_map_invalid_noflush(struct page *page)
 	if (!can_set_direct_map())
 		return 0;
 
+	l_start = (unsigned long)page_address(page);
+	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
+	if (WARN_ON_ONCE(ret))
+		return ret;
+
 	return apply_to_page_range(&init_mm,
-				   (unsigned long)page_address(page),
-				   PAGE_SIZE, change_page_range, &data);
+				   l_start, PAGE_SIZE, change_page_range,
+				   &data);
 }
 
 int set_direct_map_default_noflush(struct page *page)
 {
+	unsigned long l_start;
+	int ret;
+
 	struct page_change_data data = {
 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
 		.clear_mask = __pgprot(PTE_RDONLY),
@@ -197,9 +217,14 @@ int set_direct_map_default_noflush(struct page *page)
 	if (!can_set_direct_map())
 		return 0;
 
+	l_start = (unsigned long)page_address(page);
+	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
+	if (WARN_ON_ONCE(ret))
+		return ret;
+
 	return apply_to_page_range(&init_mm,
-				   (unsigned long)page_address(page),
-				   PAGE_SIZE, change_page_range, &data);
+				   l_start, PAGE_SIZE, change_page_range,
+				   &data);
 }
 
 static int __set_memory_enc_dec(unsigned long addr,
-- 
2.48.1
Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Ryan Roberts 6 months ago
On 31/05/2025 03:41, Yang Shi wrote:
> When rodata=full is specified, kernel linear mapping has to be mapped at
> PTE level since large page table can't be split due to break-before-make
> rule on ARM64.
> 
> This resulted in a couple of problems:
>   - performance degradation
>   - more TLB pressure
>   - memory waste for kernel page table
> 
> With FEAT_BBM level 2 support, splitting large block page table to
> smaller ones doesn't need to make the page table entry invalid anymore.
> This allows kernel split large block mapping on the fly.
> 
> Add kernel page table split support and use large block mapping by
> default when FEAT_BBM level 2 is supported for rodata=full.  When
> changing permissions for kernel linear mapping, the page table will be
> split to smaller size.
> 
> The machine without FEAT_BBM level 2 will fallback to have kernel linear
> mapping PTE-mapped when rodata=full.
> 
> With this we saw significant performance boost with some benchmarks and
> much less memory consumption on my AmpereOne machine (192 cores, 1P) with
> 256GB memory.
> 
> * Memory use after boot
> Before:
> MemTotal:       258988984 kB
> MemFree:        254821700 kB
> 
> After:
> MemTotal:       259505132 kB
> MemFree:        255410264 kB
> 
> Around 500MB more memory are free to use.  The larger the machine, the
> more memory saved.
> 
> * Memcached
> We saw performance degradation when running Memcached benchmark with
> rodata=full vs rodata=on.  Our profiling pointed to kernel TLB pressure.
> With this patchset we saw ops/sec is increased by around 3.5%, P99
> latency is reduced by around 9.6%.
> The gain mainly came from reduced kernel TLB misses.  The kernel TLB
> MPKI is reduced by 28.5%.
> 
> The benchmark data is now on par with rodata=on too.
> 
> * Disk encryption (dm-crypt) benchmark
> Ran fio benchmark with the below command on a 128G ramdisk (ext4) with disk
> encryption (by dm-crypt).
> fio --directory=/data --random_generator=lfsr --norandommap --randrepeat 1 \
>     --status-interval=999 --rw=write --bs=4k --loops=1 --ioengine=sync \
>     --iodepth=1 --numjobs=1 --fsync_on_close=1 --group_reporting --thread \
>     --name=iops-test-job --eta-newline=1 --size 100G
> 
> The IOPS is increased by 90% - 150% (the variance is high, but the worst
> number of good case is around 90% more than the best number of bad case).
> The bandwidth is increased and the avg clat is reduced proportionally.
> 
> * Sequential file read
> Read 100G file sequentially on XFS (xfs_io read with page cache populated).
> The bandwidth is increased by 150%.
> 
> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
> ---
>  arch/arm64/include/asm/cpufeature.h |  26 +++
>  arch/arm64/include/asm/mmu.h        |   1 +
>  arch/arm64/include/asm/pgtable.h    |  12 +-
>  arch/arm64/kernel/cpufeature.c      |   2 +-
>  arch/arm64/mm/mmu.c                 | 269 +++++++++++++++++++++++++---
>  arch/arm64/mm/pageattr.c            |  37 +++-
>  6 files changed, 319 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index 8f36ffa16b73..a95806980298 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -1053,6 +1053,32 @@ static inline bool cpu_has_lpa2(void)
>  #endif
>  }
>  

[...] (I gave comments on this part in previous reply)

I'm focussing on teh table walker in mmu.c here - i.e. implementation of
split_linear_mapping()...

> index 775c0536b194..4c5d3aa35d62 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -45,6 +45,7 @@
>  #define NO_BLOCK_MAPPINGS	BIT(0)
>  #define NO_CONT_MAPPINGS	BIT(1)
>  #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
> +#define SPLIT_MAPPINGS		BIT(3)
>  
>  u64 kimage_voffset __ro_after_init;
>  EXPORT_SYMBOL(kimage_voffset);
> @@ -166,12 +167,91 @@ static void init_clear_pgtable(void *table)
>  	dsb(ishst);
>  }
>  
> +static void split_cont_pte(pte_t *ptep)
> +{
> +	pte_t *_ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
> +	pte_t _pte;
> +
> +	for (int i = 0; i < CONT_PTES; i++, _ptep++) {
> +		_pte = READ_ONCE(*_ptep);
> +		_pte = pte_mknoncont(_pte);
> +		__set_pte_nosync(_ptep, _pte);

This is not atomic but I don't think that matters for kernel mappings since we
don't care about HW-modified access/dirty bits.

> +	}
> +
> +	dsb(ishst);
> +	isb();

I think we can use lazy_mmu_mode here to potentially batch the barriers for
multiple levels. This also avoids the need for adding __set_pmd_nosync().

> +}
> +
> +static void split_cont_pmd(pmd_t *pmdp)
> +{
> +	pmd_t *_pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS);
> +	pmd_t _pmd;
> +
> +	for (int i = 0; i < CONT_PMDS; i++, _pmdp++) {
> +		_pmd = READ_ONCE(*_pmdp);
> +		_pmd = pmd_mknoncont(_pmd);
> +		set_pmd(_pmdp, _pmd);

Without lazy_mmu_mode this is issuing barriers per entry. With lazy_mmu_mode
this will defer the barriers until we exit the mode so this will get a bit
faster. (in practice it will be a bit like what you have done for contpte but
potentially even better because we can batch across levels.

> +	}
> +}
> +
> +static void split_pmd(pmd_t pmd, phys_addr_t pte_phys, int flags)
> +{
> +	pte_t *ptep;
> +	unsigned long pfn;
> +	pgprot_t prot;
> +
> +	pfn = pmd_pfn(pmd);
> +	prot = pmd_pgprot(pmd);
> +	prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PTE_TYPE_PAGE);
> +
> +	ptep = (pte_t *)phys_to_virt(pte_phys);
> +
> +	/* It must be naturally aligned if PMD is leaf */
> +	if ((flags & NO_CONT_MAPPINGS) == 0)

I'm not sure we have a use case for avoiding CONT mappings? Suggest doing it
unconditionally.

> +		prot = __pgprot(pgprot_val(prot) | PTE_CONT);
> +
> +	for (int i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
> +		__set_pte_nosync(ptep, pfn_pte(pfn, prot));
> +
> +	dsb(ishst);
> +}
> +
> +static void split_pud(pud_t pud, phys_addr_t pmd_phys, int flags)
> +{
> +	pmd_t *pmdp;
> +	unsigned long pfn;
> +	pgprot_t prot;
> +	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
> +
> +	pfn = pud_pfn(pud);
> +	prot = pud_pgprot(pud);
> +	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
> +
> +	/* It must be naturally aligned if PUD is leaf */
> +	if ((flags & NO_CONT_MAPPINGS) == 0)
> +		prot = __pgprot(pgprot_val(prot) | PTE_CONT);
> +
> +	for (int i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
> +		__set_pmd_nosync(pmdp, pfn_pmd(pfn, prot));
> +		pfn += step;
> +	}
> +
> +	dsb(ishst);
> +}
> +
>  static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
> -		     phys_addr_t phys, pgprot_t prot)
> +		     phys_addr_t phys, pgprot_t prot, int flags)
>  {
>  	do {
>  		pte_t old_pte = __ptep_get(ptep);
>  
> +		if (flags & SPLIT_MAPPINGS) {
> +			if (pte_cont(old_pte))
> +				split_cont_pte(ptep);
> +
> +			continue;
> +		}
> +
>  		/*
>  		 * Required barriers to make this visible to the table walker
>  		 * are deferred to the end of alloc_init_cont_pte().
> @@ -199,11 +279,20 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  	pmd_t pmd = READ_ONCE(*pmdp);
>  	pte_t *ptep;
>  	int ret = 0;
> +	bool split = flags & SPLIT_MAPPINGS;
> +	pmdval_t pmdval;
> +	phys_addr_t pte_phys;
>  
> -	BUG_ON(pmd_sect(pmd));
> -	if (pmd_none(pmd)) {
> -		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
> -		phys_addr_t pte_phys;
> +	if (!split)
> +		BUG_ON(pmd_sect(pmd));
> +
> +	if (pmd_none(pmd) && split) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (pmd_none(pmd) || (split && pmd_leaf(pmd))) {
> +		pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
>  
>  		if (flags & NO_EXEC_MAPPINGS)
>  			pmdval |= PMD_TABLE_PXN;
> @@ -213,6 +302,18 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  			ret = -ENOMEM;
>  			goto out;
>  		}
> +	}
> +
> +	if (split) {
> +		if (pmd_leaf(pmd)) {
> +			split_pmd(pmd, pte_phys, flags);
> +			__pmd_populate(pmdp, pte_phys, pmdval);
> +		}
> +		ptep = pte_offset_kernel(pmdp, addr);
> +		goto split_pgtable;
> +	}
> +
> +	if (pmd_none(pmd)) {
>  		ptep = pte_set_fixmap(pte_phys);
>  		init_clear_pgtable(ptep);
>  		ptep += pte_index(addr);
> @@ -222,17 +323,28 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  		ptep = pte_set_fixmap_offset(pmdp, addr);
>  	}
>  
> +split_pgtable:
>  	do {
>  		pgprot_t __prot = prot;
>  
>  		next = pte_cont_addr_end(addr, end);
>  
> +		if (split) {
> +			pte_t pteval = READ_ONCE(*ptep);
> +			bool cont = pte_cont(pteval);
> +
> +			if (cont &&
> +			    ((addr | next) & ~CONT_PTE_MASK) == 0 &&
> +			    (flags & NO_CONT_MAPPINGS) == 0)
> +				continue;
> +		}
> +
>  		/* use a contiguous mapping if the range is suitably aligned */
>  		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
>  		    (flags & NO_CONT_MAPPINGS) == 0)
>  			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>  
> -		init_pte(ptep, addr, next, phys, __prot);
> +		init_pte(ptep, addr, next, phys, __prot, flags);
>  
>  		ptep += pte_index(next) - pte_index(addr);
>  		phys += next - addr;
> @@ -243,7 +355,8 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  	 * ensure that all previous pgtable writes are visible to the table
>  	 * walker.
>  	 */
> -	pte_clear_fixmap();
> +	if (!split)
> +		pte_clear_fixmap();
>  
>  out:
>  	return ret;
> @@ -255,15 +368,29 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>  {
>  	unsigned long next;
>  	int ret = 0;
> +	bool split = flags & SPLIT_MAPPINGS;
> +	bool cont;
>  
>  	do {
>  		pmd_t old_pmd = READ_ONCE(*pmdp);
>  
>  		next = pmd_addr_end(addr, end);
>  
> +		if (split && pmd_leaf(old_pmd)) {
> +			cont = pgprot_val(pmd_pgprot(old_pmd)) & PTE_CONT;
> +			if (cont)
> +				split_cont_pmd(pmdp);
> +
> +			/* The PMD is fully contained in the range */
> +			if (((addr | next) & ~PMD_MASK) == 0 &&
> +			    (flags & NO_BLOCK_MAPPINGS) == 0)
> +				continue;
> +		}
> +
>  		/* try section mapping first */
>  		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
> -		    (flags & NO_BLOCK_MAPPINGS) == 0) {
> +		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
> +		    (flags & SPLIT_MAPPINGS) == 0) {
>  			pmd_set_huge(pmdp, phys, prot);
>  
>  			/*
> @@ -278,7 +405,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>  			if (ret)
>  				break;
>  
> -			BUG_ON(pmd_val(old_pmd) != 0 &&
> +			BUG_ON(!split && pmd_val(old_pmd) != 0 &&
>  			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
>  		}
>  		phys += next - addr;
> @@ -296,14 +423,23 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>  	int ret = 0;
>  	pud_t pud = READ_ONCE(*pudp);
>  	pmd_t *pmdp;
> +	bool split = flags & SPLIT_MAPPINGS;
> +	pudval_t pudval;
> +	phys_addr_t pmd_phys;
>  
>  	/*
>  	 * Check for initial section mappings in the pgd/pud.
>  	 */
> -	BUG_ON(pud_sect(pud));
> -	if (pud_none(pud)) {
> -		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
> -		phys_addr_t pmd_phys;
> +	if (!split)
> +		BUG_ON(pud_sect(pud));
> +
> +	if (pud_none(pud) && split) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (pud_none(pud) || (split && pud_leaf(pud))) {
> +		pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
>  
>  		if (flags & NO_EXEC_MAPPINGS)
>  			pudval |= PUD_TABLE_PXN;
> @@ -313,6 +449,18 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>  			ret = -ENOMEM;
>  			goto out;
>  		}
> +	}
> +
> +	if (split) {
> +		if (pud_leaf(pud)) {
> +			split_pud(pud, pmd_phys, flags);
> +			__pud_populate(pudp, pmd_phys, pudval);
> +		}
> +		pmdp = pmd_offset(pudp, addr);
> +		goto split_pgtable;
> +	}
> +
> +	if (pud_none(pud)) {
>  		pmdp = pmd_set_fixmap(pmd_phys);
>  		init_clear_pgtable(pmdp);
>  		pmdp += pmd_index(addr);
> @@ -322,11 +470,22 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>  		pmdp = pmd_set_fixmap_offset(pudp, addr);
>  	}
>  
> +split_pgtable:
>  	do {
>  		pgprot_t __prot = prot;
>  
>  		next = pmd_cont_addr_end(addr, end);
>  
> +		if (split) {
> +			pmd_t pmdval = READ_ONCE(*pmdp);
> +			bool cont = pgprot_val(pmd_pgprot(pmdval)) & PTE_CONT;
> +
> +			if (cont &&
> +			    ((addr | next) & ~CONT_PMD_MASK) == 0 &&
> +			    (flags & NO_CONT_MAPPINGS) == 0)
> +				continue;
> +		}
> +
>  		/* use a contiguous mapping if the range is suitably aligned */
>  		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
>  		    (flags & NO_CONT_MAPPINGS) == 0)
> @@ -340,7 +499,8 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>  		phys += next - addr;
>  	} while (addr = next, addr != end);
>  
> -	pmd_clear_fixmap();
> +	if (!split)
> +		pmd_clear_fixmap();
>  
>  out:
>  	return ret;
> @@ -355,6 +515,16 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>  	int ret = 0;
>  	p4d_t p4d = READ_ONCE(*p4dp);
>  	pud_t *pudp;
> +	bool split = flags & SPLIT_MAPPINGS;
> +
> +	if (split) {
> +		if (p4d_none(p4d)) {
> +			ret= -EINVAL;
> +			goto out;
> +		}
> +		pudp = pud_offset(p4dp, addr);
> +		goto split_pgtable;
> +	}
>  
>  	if (p4d_none(p4d)) {
>  		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF;
> @@ -377,17 +547,26 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>  		pudp = pud_set_fixmap_offset(p4dp, addr);
>  	}
>  
> +split_pgtable:
>  	do {
>  		pud_t old_pud = READ_ONCE(*pudp);
>  
>  		next = pud_addr_end(addr, end);
>  
> +		if (split && pud_leaf(old_pud)) {
> +			/* The PUD is fully contained in the range */
> +			if (((addr | next) & ~PUD_MASK) == 0 &&
> +			    (flags & NO_BLOCK_MAPPINGS) == 0)
> +				continue;
> +		}
> +
>  		/*
>  		 * For 4K granule only, attempt to put down a 1GB block
>  		 */
>  		if (pud_sect_supported() &&
>  		   ((addr | next | phys) & ~PUD_MASK) == 0 &&
> -		    (flags & NO_BLOCK_MAPPINGS) == 0) {
> +		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
> +		    (flags & SPLIT_MAPPINGS) == 0) {
>  			pud_set_huge(pudp, phys, prot);
>  
>  			/*
> @@ -402,13 +581,14 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>  			if (ret)
>  				break;
>  
> -			BUG_ON(pud_val(old_pud) != 0 &&
> +			BUG_ON(!split && pud_val(old_pud) != 0 &&
>  			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
>  		}
>  		phys += next - addr;
>  	} while (pudp++, addr = next, addr != end);
>  
> -	pud_clear_fixmap();
> +	if (!split)
> +		pud_clear_fixmap();
>  
>  out:
>  	return ret;
> @@ -423,6 +603,16 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>  	int ret = 0;
>  	pgd_t pgd = READ_ONCE(*pgdp);
>  	p4d_t *p4dp;
> +	bool split = flags & SPLIT_MAPPINGS;
> +
> +	if (split) {
> +		if (pgd_none(pgd)) {
> +			ret = -EINVAL;
> +			goto out;
> +		}
> +		p4dp = p4d_offset(pgdp, addr);
> +		goto split_pgtable;
> +	}

I really don't like the way the split logic has been added to the existing table
walker; there are so many conditionals, it's not clear that there is really any
advantage. I know I proposed it originally, but I changed my mind last cycle and
made the case for keeping it separate. That's still my opinon I'm afraid; I'm
proposing a patch below showing how I would prefer to see this implemented.

>  
>  	if (pgd_none(pgd)) {
>  		pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF;
> @@ -445,6 +635,7 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>  		p4dp = p4d_set_fixmap_offset(pgdp, addr);
>  	}
>  
> +split_pgtable:
>  	do {
>  		p4d_t old_p4d = READ_ONCE(*p4dp);
>  
> @@ -461,7 +652,8 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>  		phys += next - addr;
>  	} while (p4dp++, addr = next, addr != end);
>  
> -	p4d_clear_fixmap();
> +	if (!split)
> +		p4d_clear_fixmap();
>  
>  out:
>  	return ret;
> @@ -557,6 +749,25 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
>  	return pa;
>  }
>  
> +int split_linear_mapping(unsigned long start, unsigned long end)
> +{
> +	int ret = 0;
> +
> +	if (!system_supports_bbml2_noabort())
> +		return 0;
> +
> +	mmap_write_lock(&init_mm);
> +	/* NO_EXEC_MAPPINGS is needed when splitting linear map */
> +	ret = __create_pgd_mapping_locked(init_mm.pgd, virt_to_phys((void *)start),
> +					  start, (end - start), __pgprot(0),
> +					  __pgd_pgtable_alloc,
> +					  NO_EXEC_MAPPINGS | SPLIT_MAPPINGS);

Implementing this on top of __create_pgd_mapping_locked() is problematic because
(I think) it assumes that the virtual range is physically contiguous? That's
fine for the linear map, but I'd like to reuse this primitive for vmalloc too.

> +	mmap_write_unlock(&init_mm);

As already mentioned, I don't like this locking. I think we can make it work
locklessly as long as we are only ever splitting and not collapsing.

> +	flush_tlb_kernel_range(start, end);
> +
> +	return ret;
> +}

I had a go at creating a version to try to illustrate how I have been thinking
about this. What do you think? I've only compile tested it (and it fails because
I don't have pmd_mknoncont() and system_supports_bbml2_noabort() in my tree -
but the rest looks ok). It's on top of v6.16-rc1, where the pgtable allocation
functions have changed a bit. And I don't think you need patch 2 from your
series with this change. I haven't implemented the cmpxchg part that I think
would make it safe to be used locklessly yet, but I've marked the sites up with
TODO. Once implemented, the idea is that concurrent threads trying to split on
addresses that all lie within the same block/contig mapping should be safe.

---8<---
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8fcf59ba39db..22a09cc7a2aa 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -480,6 +480,9 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt,
 			     int flags);
 #endif

+/* Sentinel used to represent failure to allocate for phys_addr_t type. */
+#define INVALID_PHYS_ADDR -1
+
 static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
 				       enum pgtable_type pgtable_type)
 {
@@ -487,7 +490,9 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
 	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
 	phys_addr_t pa;

-	BUG_ON(!ptdesc);
+	if (!ptdesc)
+		return INVALID_PHYS_ADDR;
+
 	pa = page_to_phys(ptdesc_page(ptdesc));

 	switch (pgtable_type) {
@@ -509,15 +514,27 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
 }

 static phys_addr_t __maybe_unused
-pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
+try_pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
 {
 	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
 }

+static phys_addr_t __maybe_unused
+pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
+{
+	phys_addr_t pa = __pgd_pgtable_alloc(&init_mm, pgtable_type);
+
+	BUG_ON(!pa);
+	return pa;
+}
+
 static phys_addr_t
 pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
 {
-	return __pgd_pgtable_alloc(NULL, pgtable_type);
+	phys_addr_t pa = __pgd_pgtable_alloc(NULL, pgtable_type);
+
+	BUG_ON(!pa);
+	return pa;
 }

 /*
@@ -1616,3 +1633,202 @@ int arch_set_user_pkey_access(struct task_struct *tsk,
int pkey, unsigned long i
 	return 0;
 }
 #endif
+
+static void split_contpte(pte_t *ptep)
+{
+	int i;
+
+	ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
+	for (i = 0; i < CONT_PTES; i++, ptep++)
+		__set_pte(ptep, pte_mknoncont(__ptep_get(ptep)));
+}
+
+static int split_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+	pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
+	unsigned long pfn = pmd_pfn(pmd);
+	pgprot_t prot = pmd_pgprot(pmd);
+	phys_addr_t pte_phys;
+	pte_t *ptep;
+	int i;
+
+	pte_phys = try_pgd_pgtable_alloc_init_mm(TABLE_PTE);
+	if (pte_phys == INVALID_PHYS_ADDR)
+		return -ENOMEM;
+	ptep = (pte_t *)phys_to_virt(pte_phys);
+
+	if (pgprot_val(prot) & PMD_SECT_PXN)
+		tableprot |= PMD_TABLE_PXN;
+
+	prot = __pgprot((pgprot_val(prot) & ~PTE_TYPE_MASK) | PTE_TYPE_PAGE);
+	prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+	for (i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
+		__set_pte(ptep, pfn_pte(pfn, prot));
+
+	/*
+	 * Ensure the pte entries are visible to the table walker by the time
+	 * the pmd entry that points to the ptes is visible.
+	 */
+	dsb(ishst);
+
+	// TODO: THIS NEEDS TO BE CMPXCHG THEN FREE THE TABLE IF WE LOST.
+	__pmd_populate(pmdp, pte_phys, tableprot);
+
+	return 0;
+}
+
+static void split_contpmd(pmd_t *pmdp)
+{
+	int i;
+
+	pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS);
+	for (i = 0; i < CONT_PMDS; i++, pmdp++)
+		set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp)));
+}
+
+static int split_pud(pud_t *pudp, pud_t pud)
+{
+	pudval_t tableprot = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
+	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
+	unsigned long pfn = pud_pfn(pud);
+	pgprot_t prot = pud_pgprot(pud);
+	phys_addr_t pmd_phys;
+	pmd_t *pmdp;
+	int i;
+
+	pmd_phys = try_pgd_pgtable_alloc_init_mm(TABLE_PMD);
+	if (pmd_phys == INVALID_PHYS_ADDR)
+		return -ENOMEM;
+	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
+
+	if (pgprot_val(prot) & PMD_SECT_PXN)
+		tableprot |= PUD_TABLE_PXN;
+
+	prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT);
+	prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+	for (i = 0; i < PTRS_PER_PMD; i++, pmdp++, pfn += step)
+		set_pmd(pmdp, pfn_pmd(pfn, prot));
+
+	/*
+	 * Ensure the pmd entries are visible to the table walker by the time
+	 * the pud entry that points to the pmds is visible.
+	 */
+	dsb(ishst);
+
+	// TODO: THIS NEEDS TO BE CMPXCHG THEN FREE THE TABLE IF WE LOST.
+	__pud_populate(pudp, pmd_phys, tableprot);
+
+	return 0;
+}
+
+int split_leaf_mapping(unsigned long addr)
+{
+	pgd_t *pgdp, pgd;
+	p4d_t *p4dp, p4d;
+	pud_t *pudp, pud;
+	pmd_t *pmdp, pmd;
+	pte_t *ptep, pte;
+	int ret = 0;
+
+	/*
+	 * !BBML2_NOABORT systems should not be trying to change permissions on
+	 * anything that is not pte-mapped in the first place. Just return early
+	 * and let the permission change code raise a warning if not already
+	 * pte-mapped.
+	 */
+	if (!system_supports_bbml2_noabort())
+		return 0;
+
+	/*
+	 * Ensure addr is at least page-aligned since this is the finest
+	 * granularity we can split to.
+	 */
+	if (addr != PAGE_ALIGN(addr))
+		return -EINVAL;
+
+	arch_enter_lazy_mmu_mode();
+
+	/*
+	 * PGD: If addr is PGD aligned then addr already describes a leaf
+	 * boundary. If not present then there is nothing to split.
+	 */
+	if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr)
+		goto out;
+	pgdp = pgd_offset_k(addr);
+	pgd = pgdp_get(pgdp);
+	if (!pgd_present(pgd))
+		goto out;
+
+	/*
+	 * P4D: If addr is P4D aligned then addr already describes a leaf
+	 * boundary. If not present then there is nothing to split.
+	 */
+	if (ALIGN_DOWN(addr, P4D_SIZE) == addr)
+		goto out;
+	p4dp = p4d_offset(pgdp, addr);
+	p4d = p4dp_get(p4dp);
+	if (!p4d_present(p4d))
+		goto out;
+
+	/*
+	 * PUD: If addr is PUD aligned then addr already describes a leaf
+	 * boundary. If not present then there is nothing to split. Otherwise,
+	 * if we have a pud leaf, split to contpmd.
+	 */
+	if (ALIGN_DOWN(addr, PUD_SIZE) == addr)
+		goto out;
+	pudp = pud_offset(p4dp, addr);
+	pud = pudp_get(pudp);
+	if (!pud_present(pud))
+		goto out;
+	if (pud_leaf(pud)) {
+		ret = split_pud(pudp, pud);
+		if (ret)
+			goto out;
+	}
+
+	/*
+	 * CONTPMD: If addr is CONTPMD aligned then addr already describes a
+	 * leaf boundary. If not present then there is nothing to split.
+	 * Otherwise, if we have a contpmd leaf, split to pmd.
+	 */
+	if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
+		goto out;
+	pmdp = pmd_offset(pudp, addr);
+	pmd = pmdp_get(pmdp);
+	if (!pmd_present(pmd))
+		goto out;
+	if (pmd_leaf(pmd)) {
+		if (pmd_cont(pmd))
+			split_contpmd(pmdp);
+		/*
+		 * PMD: If addr is PMD aligned then addr already describes a
+		 * leaf boundary. Otherwise, split to contpte.
+		 */
+		if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
+			goto out;
+		ret = split_pmd(pmdp, pmd);
+		if (ret)
+			goto out;
+	}
+
+	/*
+	 * CONTPTE: If addr is CONTPTE aligned then addr already describes a
+	 * leaf boundary. If not present then there is nothing to split.
+	 * Otherwise, if we have a contpte leaf, split to pte.
+	 */
+	if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
+		goto out;
+	ptep = pte_offset_kernel(pmdp, addr);
+	pte = __ptep_get(ptep);
+	if (!pte_present(pte))
+		goto out;
+	if (pte_cont(pte))
+		split_contpte(ptep);
+
+out:
+	arch_leave_lazy_mmu_mode();
+	return ret;
+}
---8<---

Thanks,
Ryan
Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 6 months ago

On 6/16/25 9:24 AM, Ryan Roberts wrote:
> On 31/05/2025 03:41, Yang Shi wrote:
>> When rodata=full is specified, kernel linear mapping has to be mapped at
>> PTE level since large page table can't be split due to break-before-make
>> rule on ARM64.
>>
>> This resulted in a couple of problems:
>>    - performance degradation
>>    - more TLB pressure
>>    - memory waste for kernel page table
>>
>> With FEAT_BBM level 2 support, splitting large block page table to
>> smaller ones doesn't need to make the page table entry invalid anymore.
>> This allows kernel split large block mapping on the fly.
>>
>> Add kernel page table split support and use large block mapping by
>> default when FEAT_BBM level 2 is supported for rodata=full.  When
>> changing permissions for kernel linear mapping, the page table will be
>> split to smaller size.
>>
>> The machine without FEAT_BBM level 2 will fallback to have kernel linear
>> mapping PTE-mapped when rodata=full.
>>
>> With this we saw significant performance boost with some benchmarks and
>> much less memory consumption on my AmpereOne machine (192 cores, 1P) with
>> 256GB memory.
>>
>> * Memory use after boot
>> Before:
>> MemTotal:       258988984 kB
>> MemFree:        254821700 kB
>>
>> After:
>> MemTotal:       259505132 kB
>> MemFree:        255410264 kB
>>
>> Around 500MB more memory are free to use.  The larger the machine, the
>> more memory saved.
>>
>> * Memcached
>> We saw performance degradation when running Memcached benchmark with
>> rodata=full vs rodata=on.  Our profiling pointed to kernel TLB pressure.
>> With this patchset we saw ops/sec is increased by around 3.5%, P99
>> latency is reduced by around 9.6%.
>> The gain mainly came from reduced kernel TLB misses.  The kernel TLB
>> MPKI is reduced by 28.5%.
>>
>> The benchmark data is now on par with rodata=on too.
>>
>> * Disk encryption (dm-crypt) benchmark
>> Ran fio benchmark with the below command on a 128G ramdisk (ext4) with disk
>> encryption (by dm-crypt).
>> fio --directory=/data --random_generator=lfsr --norandommap --randrepeat 1 \
>>      --status-interval=999 --rw=write --bs=4k --loops=1 --ioengine=sync \
>>      --iodepth=1 --numjobs=1 --fsync_on_close=1 --group_reporting --thread \
>>      --name=iops-test-job --eta-newline=1 --size 100G
>>
>> The IOPS is increased by 90% - 150% (the variance is high, but the worst
>> number of good case is around 90% more than the best number of bad case).
>> The bandwidth is increased and the avg clat is reduced proportionally.
>>
>> * Sequential file read
>> Read 100G file sequentially on XFS (xfs_io read with page cache populated).
>> The bandwidth is increased by 150%.
>>
>> Signed-off-by: Yang Shi<yang@os.amperecomputing.com>
>> ---
>>   arch/arm64/include/asm/cpufeature.h |  26 +++
>>   arch/arm64/include/asm/mmu.h        |   1 +
>>   arch/arm64/include/asm/pgtable.h    |  12 +-
>>   arch/arm64/kernel/cpufeature.c      |   2 +-
>>   arch/arm64/mm/mmu.c                 | 269 +++++++++++++++++++++++++---
>>   arch/arm64/mm/pageattr.c            |  37 +++-
>>   6 files changed, 319 insertions(+), 28 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
>> index 8f36ffa16b73..a95806980298 100644
>> --- a/arch/arm64/include/asm/cpufeature.h
>> +++ b/arch/arm64/include/asm/cpufeature.h
>> @@ -1053,6 +1053,32 @@ static inline bool cpu_has_lpa2(void)
>>   #endif
>>   }
>>   
> [...] (I gave comments on this part in previous reply)
>
> I'm focussing on teh table walker in mmu.c here - i.e. implementation of
> split_linear_mapping()...
>
>> index 775c0536b194..4c5d3aa35d62 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -45,6 +45,7 @@
>>   #define NO_BLOCK_MAPPINGS	BIT(0)
>>   #define NO_CONT_MAPPINGS	BIT(1)
>>   #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
>> +#define SPLIT_MAPPINGS		BIT(3)
>>   
>>   u64 kimage_voffset __ro_after_init;
>>   EXPORT_SYMBOL(kimage_voffset);
>> @@ -166,12 +167,91 @@ static void init_clear_pgtable(void *table)
>>   	dsb(ishst);
>>   }
>>   
>> +static void split_cont_pte(pte_t *ptep)
>> +{
>> +	pte_t *_ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
>> +	pte_t _pte;
>> +
>> +	for (int i = 0; i < CONT_PTES; i++, _ptep++) {
>> +		_pte = READ_ONCE(*_ptep);
>> +		_pte = pte_mknoncont(_pte);
>> +		__set_pte_nosync(_ptep, _pte);
> This is not atomic but I don't think that matters for kernel mappings since we
> don't care about HW-modified access/dirty bits.

Yes, access bit should be always set for kernel mappings if I remember 
correctly. Kernel mapping doesn't care about dirty bit.

>> +	}
>> +
>> +	dsb(ishst);
>> +	isb();
> I think we can use lazy_mmu_mode here to potentially batch the barriers for
> multiple levels. This also avoids the need for adding __set_pmd_nosync().
>
>> +}
>> +
>> +static void split_cont_pmd(pmd_t *pmdp)
>> +{
>> +	pmd_t *_pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS);
>> +	pmd_t _pmd;
>> +
>> +	for (int i = 0; i < CONT_PMDS; i++, _pmdp++) {
>> +		_pmd = READ_ONCE(*_pmdp);
>> +		_pmd = pmd_mknoncont(_pmd);
>> +		set_pmd(_pmdp, _pmd);
> Without lazy_mmu_mode this is issuing barriers per entry. With lazy_mmu_mode
> this will defer the barriers until we exit the mode so this will get a bit
> faster. (in practice it will be a bit like what you have done for contpte but
> potentially even better because we can batch across levels.

OK, lazy mmu should work IIUC. The concurrent page table walker should 
see either the old entry or the new entry. Both the old entry and the 
new entry point to the same physical address and have the same 
permission, just different page size. BBML2_NOABORT can handle this 
gracefully.

>> +	}
>> +}
>> +
>> +static void split_pmd(pmd_t pmd, phys_addr_t pte_phys, int flags)
>> +{
>> +	pte_t *ptep;
>> +	unsigned long pfn;
>> +	pgprot_t prot;
>> +
>> +	pfn = pmd_pfn(pmd);
>> +	prot = pmd_pgprot(pmd);
>> +	prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PTE_TYPE_PAGE);
>> +
>> +	ptep = (pte_t *)phys_to_virt(pte_phys);
>> +
>> +	/* It must be naturally aligned if PMD is leaf */
>> +	if ((flags & NO_CONT_MAPPINGS) == 0)
> I'm not sure we have a use case for avoiding CONT mappings? Suggest doing it
> unconditionally.

Repainting linear mapping does avoid CONT mappings.

>> +		prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>> +
>> +	for (int i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
>> +		__set_pte_nosync(ptep, pfn_pte(pfn, prot));
>> +
>> +	dsb(ishst);
>> +}
>> +
>> +static void split_pud(pud_t pud, phys_addr_t pmd_phys, int flags)
>> +{
>> +	pmd_t *pmdp;
>> +	unsigned long pfn;
>> +	pgprot_t prot;
>> +	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
>> +
>> +	pfn = pud_pfn(pud);
>> +	prot = pud_pgprot(pud);
>> +	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
>> +
>> +	/* It must be naturally aligned if PUD is leaf */
>> +	if ((flags & NO_CONT_MAPPINGS) == 0)
>> +		prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>> +
>> +	for (int i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
>> +		__set_pmd_nosync(pmdp, pfn_pmd(pfn, prot));
>> +		pfn += step;
>> +	}
>> +
>> +	dsb(ishst);
>> +}
>> +
>>   static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
>> -		     phys_addr_t phys, pgprot_t prot)
>> +		     phys_addr_t phys, pgprot_t prot, int flags)
>>   {
>>   	do {
>>   		pte_t old_pte = __ptep_get(ptep);
>>   
>> +		if (flags & SPLIT_MAPPINGS) {
>> +			if (pte_cont(old_pte))
>> +				split_cont_pte(ptep);
>> +
>> +			continue;
>> +		}
>> +
>>   		/*
>>   		 * Required barriers to make this visible to the table walker
>>   		 * are deferred to the end of alloc_init_cont_pte().
>> @@ -199,11 +279,20 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>   	pmd_t pmd = READ_ONCE(*pmdp);
>>   	pte_t *ptep;
>>   	int ret = 0;
>> +	bool split = flags & SPLIT_MAPPINGS;
>> +	pmdval_t pmdval;
>> +	phys_addr_t pte_phys;
>>   
>> -	BUG_ON(pmd_sect(pmd));
>> -	if (pmd_none(pmd)) {
>> -		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
>> -		phys_addr_t pte_phys;
>> +	if (!split)
>> +		BUG_ON(pmd_sect(pmd));
>> +
>> +	if (pmd_none(pmd) && split) {
>> +		ret = -EINVAL;
>> +		goto out;
>> +	}
>> +
>> +	if (pmd_none(pmd) || (split && pmd_leaf(pmd))) {
>> +		pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
>>   
>>   		if (flags & NO_EXEC_MAPPINGS)
>>   			pmdval |= PMD_TABLE_PXN;
>> @@ -213,6 +302,18 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>   			ret = -ENOMEM;
>>   			goto out;
>>   		}
>> +	}
>> +
>> +	if (split) {
>> +		if (pmd_leaf(pmd)) {
>> +			split_pmd(pmd, pte_phys, flags);
>> +			__pmd_populate(pmdp, pte_phys, pmdval);
>> +		}
>> +		ptep = pte_offset_kernel(pmdp, addr);
>> +		goto split_pgtable;
>> +	}
>> +
>> +	if (pmd_none(pmd)) {
>>   		ptep = pte_set_fixmap(pte_phys);
>>   		init_clear_pgtable(ptep);
>>   		ptep += pte_index(addr);
>> @@ -222,17 +323,28 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>   		ptep = pte_set_fixmap_offset(pmdp, addr);
>>   	}
>>   
>> +split_pgtable:
>>   	do {
>>   		pgprot_t __prot = prot;
>>   
>>   		next = pte_cont_addr_end(addr, end);
>>   
>> +		if (split) {
>> +			pte_t pteval = READ_ONCE(*ptep);
>> +			bool cont = pte_cont(pteval);
>> +
>> +			if (cont &&
>> +			    ((addr | next) & ~CONT_PTE_MASK) == 0 &&
>> +			    (flags & NO_CONT_MAPPINGS) == 0)
>> +				continue;
>> +		}
>> +
>>   		/* use a contiguous mapping if the range is suitably aligned */
>>   		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
>>   		    (flags & NO_CONT_MAPPINGS) == 0)
>>   			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>>   
>> -		init_pte(ptep, addr, next, phys, __prot);
>> +		init_pte(ptep, addr, next, phys, __prot, flags);
>>   
>>   		ptep += pte_index(next) - pte_index(addr);
>>   		phys += next - addr;
>> @@ -243,7 +355,8 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>   	 * ensure that all previous pgtable writes are visible to the table
>>   	 * walker.
>>   	 */
>> -	pte_clear_fixmap();
>> +	if (!split)
>> +		pte_clear_fixmap();
>>   
>>   out:
>>   	return ret;
>> @@ -255,15 +368,29 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>>   {
>>   	unsigned long next;
>>   	int ret = 0;
>> +	bool split = flags & SPLIT_MAPPINGS;
>> +	bool cont;
>>   
>>   	do {
>>   		pmd_t old_pmd = READ_ONCE(*pmdp);
>>   
>>   		next = pmd_addr_end(addr, end);
>>   
>> +		if (split && pmd_leaf(old_pmd)) {
>> +			cont = pgprot_val(pmd_pgprot(old_pmd)) & PTE_CONT;
>> +			if (cont)
>> +				split_cont_pmd(pmdp);
>> +
>> +			/* The PMD is fully contained in the range */
>> +			if (((addr | next) & ~PMD_MASK) == 0 &&
>> +			    (flags & NO_BLOCK_MAPPINGS) == 0)
>> +				continue;
>> +		}
>> +
>>   		/* try section mapping first */
>>   		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
>> -		    (flags & NO_BLOCK_MAPPINGS) == 0) {
>> +		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
>> +		    (flags & SPLIT_MAPPINGS) == 0) {
>>   			pmd_set_huge(pmdp, phys, prot);
>>   
>>   			/*
>> @@ -278,7 +405,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>>   			if (ret)
>>   				break;
>>   
>> -			BUG_ON(pmd_val(old_pmd) != 0 &&
>> +			BUG_ON(!split && pmd_val(old_pmd) != 0 &&
>>   			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
>>   		}
>>   		phys += next - addr;
>> @@ -296,14 +423,23 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>>   	int ret = 0;
>>   	pud_t pud = READ_ONCE(*pudp);
>>   	pmd_t *pmdp;
>> +	bool split = flags & SPLIT_MAPPINGS;
>> +	pudval_t pudval;
>> +	phys_addr_t pmd_phys;
>>   
>>   	/*
>>   	 * Check for initial section mappings in the pgd/pud.
>>   	 */
>> -	BUG_ON(pud_sect(pud));
>> -	if (pud_none(pud)) {
>> -		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
>> -		phys_addr_t pmd_phys;
>> +	if (!split)
>> +		BUG_ON(pud_sect(pud));
>> +
>> +	if (pud_none(pud) && split) {
>> +		ret = -EINVAL;
>> +		goto out;
>> +	}
>> +
>> +	if (pud_none(pud) || (split && pud_leaf(pud))) {
>> +		pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
>>   
>>   		if (flags & NO_EXEC_MAPPINGS)
>>   			pudval |= PUD_TABLE_PXN;
>> @@ -313,6 +449,18 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>>   			ret = -ENOMEM;
>>   			goto out;
>>   		}
>> +	}
>> +
>> +	if (split) {
>> +		if (pud_leaf(pud)) {
>> +			split_pud(pud, pmd_phys, flags);
>> +			__pud_populate(pudp, pmd_phys, pudval);
>> +		}
>> +		pmdp = pmd_offset(pudp, addr);
>> +		goto split_pgtable;
>> +	}
>> +
>> +	if (pud_none(pud)) {
>>   		pmdp = pmd_set_fixmap(pmd_phys);
>>   		init_clear_pgtable(pmdp);
>>   		pmdp += pmd_index(addr);
>> @@ -322,11 +470,22 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>>   		pmdp = pmd_set_fixmap_offset(pudp, addr);
>>   	}
>>   
>> +split_pgtable:
>>   	do {
>>   		pgprot_t __prot = prot;
>>   
>>   		next = pmd_cont_addr_end(addr, end);
>>   
>> +		if (split) {
>> +			pmd_t pmdval = READ_ONCE(*pmdp);
>> +			bool cont = pgprot_val(pmd_pgprot(pmdval)) & PTE_CONT;
>> +
>> +			if (cont &&
>> +			    ((addr | next) & ~CONT_PMD_MASK) == 0 &&
>> +			    (flags & NO_CONT_MAPPINGS) == 0)
>> +				continue;
>> +		}
>> +
>>   		/* use a contiguous mapping if the range is suitably aligned */
>>   		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
>>   		    (flags & NO_CONT_MAPPINGS) == 0)
>> @@ -340,7 +499,8 @@ static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
>>   		phys += next - addr;
>>   	} while (addr = next, addr != end);
>>   
>> -	pmd_clear_fixmap();
>> +	if (!split)
>> +		pmd_clear_fixmap();
>>   
>>   out:
>>   	return ret;
>> @@ -355,6 +515,16 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>>   	int ret = 0;
>>   	p4d_t p4d = READ_ONCE(*p4dp);
>>   	pud_t *pudp;
>> +	bool split = flags & SPLIT_MAPPINGS;
>> +
>> +	if (split) {
>> +		if (p4d_none(p4d)) {
>> +			ret= -EINVAL;
>> +			goto out;
>> +		}
>> +		pudp = pud_offset(p4dp, addr);
>> +		goto split_pgtable;
>> +	}
>>   
>>   	if (p4d_none(p4d)) {
>>   		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF;
>> @@ -377,17 +547,26 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>>   		pudp = pud_set_fixmap_offset(p4dp, addr);
>>   	}
>>   
>> +split_pgtable:
>>   	do {
>>   		pud_t old_pud = READ_ONCE(*pudp);
>>   
>>   		next = pud_addr_end(addr, end);
>>   
>> +		if (split && pud_leaf(old_pud)) {
>> +			/* The PUD is fully contained in the range */
>> +			if (((addr | next) & ~PUD_MASK) == 0 &&
>> +			    (flags & NO_BLOCK_MAPPINGS) == 0)
>> +				continue;
>> +		}
>> +
>>   		/*
>>   		 * For 4K granule only, attempt to put down a 1GB block
>>   		 */
>>   		if (pud_sect_supported() &&
>>   		   ((addr | next | phys) & ~PUD_MASK) == 0 &&
>> -		    (flags & NO_BLOCK_MAPPINGS) == 0) {
>> +		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
>> +		    (flags & SPLIT_MAPPINGS) == 0) {
>>   			pud_set_huge(pudp, phys, prot);
>>   
>>   			/*
>> @@ -402,13 +581,14 @@ static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
>>   			if (ret)
>>   				break;
>>   
>> -			BUG_ON(pud_val(old_pud) != 0 &&
>> +			BUG_ON(!split && pud_val(old_pud) != 0 &&
>>   			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
>>   		}
>>   		phys += next - addr;
>>   	} while (pudp++, addr = next, addr != end);
>>   
>> -	pud_clear_fixmap();
>> +	if (!split)
>> +		pud_clear_fixmap();
>>   
>>   out:
>>   	return ret;
>> @@ -423,6 +603,16 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>>   	int ret = 0;
>>   	pgd_t pgd = READ_ONCE(*pgdp);
>>   	p4d_t *p4dp;
>> +	bool split = flags & SPLIT_MAPPINGS;
>> +
>> +	if (split) {
>> +		if (pgd_none(pgd)) {
>> +			ret = -EINVAL;
>> +			goto out;
>> +		}
>> +		p4dp = p4d_offset(pgdp, addr);
>> +		goto split_pgtable;
>> +	}
> I really don't like the way the split logic has been added to the existing table
> walker; there are so many conditionals, it's not clear that there is really any
> advantage. I know I proposed it originally, but I changed my mind last cycle and
> made the case for keeping it separate. That's still my opinon I'm afraid; I'm
> proposing a patch below showing how I would prefer to see this implemented.

Yes, it added a lot conditionals because split does something reverse, 
we need conditionals to tell it is create or split. But I don't recall 
you mentioned this in v3 discussion. Maybe I misunderstood you or missed 
the point because our discussion was focused on keep block mapping 
instead of splitting all the way down to PTE all the time.

But anyway this basically rolled back to my v2 implementation which has 
dedicated separate functions for split.

>>   
>>   	if (pgd_none(pgd)) {
>>   		pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF;
>> @@ -445,6 +635,7 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>>   		p4dp = p4d_set_fixmap_offset(pgdp, addr);
>>   	}
>>   
>> +split_pgtable:
>>   	do {
>>   		p4d_t old_p4d = READ_ONCE(*p4dp);
>>   
>> @@ -461,7 +652,8 @@ static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
>>   		phys += next - addr;
>>   	} while (p4dp++, addr = next, addr != end);
>>   
>> -	p4d_clear_fixmap();
>> +	if (!split)
>> +		p4d_clear_fixmap();
>>   
>>   out:
>>   	return ret;
>> @@ -557,6 +749,25 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
>>   	return pa;
>>   }
>>   
>> +int split_linear_mapping(unsigned long start, unsigned long end)
>> +{
>> +	int ret = 0;
>> +
>> +	if (!system_supports_bbml2_noabort())
>> +		return 0;
>> +
>> +	mmap_write_lock(&init_mm);
>> +	/* NO_EXEC_MAPPINGS is needed when splitting linear map */
>> +	ret = __create_pgd_mapping_locked(init_mm.pgd, virt_to_phys((void *)start),
>> +					  start, (end - start), __pgprot(0),
>> +					  __pgd_pgtable_alloc,
>> +					  NO_EXEC_MAPPINGS | SPLIT_MAPPINGS);
> Implementing this on top of __create_pgd_mapping_locked() is problematic because
> (I think) it assumes that the virtual range is physically contiguous? That's
> fine for the linear map, but I'd like to reuse this primitive for vmalloc too.

That assumption is for creating page table. But split doesn't care 
whether it is physically contiguous or not, the phys is not actually 
used by split primitive.

>> +	mmap_write_unlock(&init_mm);
> As already mentioned, I don't like this locking. I think we can make it work
> locklessly as long as we are only ever splitting and not collapsing.

I don't disagree lockless is good. I'm just not sure whether it is worth 
the extra complexity or not.

>> +	flush_tlb_kernel_range(start, end);
>> +
>> +	return ret;
>> +}
> I had a go at creating a version to try to illustrate how I have been thinking
> about this. What do you think? I've only compile tested it (and it fails because
> I don't have pmd_mknoncont() and system_supports_bbml2_noabort() in my tree -
> but the rest looks ok). It's on top of v6.16-rc1, where the pgtable allocation
> functions have changed a bit. And I don't think you need patch 2 from your
> series with this change. I haven't implemented the cmpxchg part that I think

Yes, patch #2 is not needed anymore for this series if we have split 
primitive in a separate function. But it should be a prerequisite for 
fixing the memory hotplug bug.

> would make it safe to be used locklessly yet, but I've marked the sites up with
> TODO. Once implemented, the idea is that concurrent threads trying to split on
> addresses that all lie within the same block/contig mapping should be safe.
>
> ---8<---
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 8fcf59ba39db..22a09cc7a2aa 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -480,6 +480,9 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys,
> unsigned long virt,
>   			     int flags);
>   #endif
>
> +/* Sentinel used to represent failure to allocate for phys_addr_t type. */
> +#define INVALID_PHYS_ADDR -1
> +
>   static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
>   				       enum pgtable_type pgtable_type)
>   {
> @@ -487,7 +490,9 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
>   	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0);
>   	phys_addr_t pa;
>
> -	BUG_ON(!ptdesc);
> +	if (!ptdesc)
> +		return INVALID_PHYS_ADDR;
> +
>   	pa = page_to_phys(ptdesc_page(ptdesc));
>
>   	switch (pgtable_type) {
> @@ -509,15 +514,27 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm,
>   }
>
>   static phys_addr_t __maybe_unused
> -pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
> +try_pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
>   {
>   	return __pgd_pgtable_alloc(&init_mm, pgtable_type);
>   }
>
> +static phys_addr_t __maybe_unused
> +pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type)
> +{
> +	phys_addr_t pa = __pgd_pgtable_alloc(&init_mm, pgtable_type);
> +
> +	BUG_ON(!pa);
> +	return pa;
> +}
> +
>   static phys_addr_t
>   pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type)
>   {
> -	return __pgd_pgtable_alloc(NULL, pgtable_type);
> +	phys_addr_t pa = __pgd_pgtable_alloc(NULL, pgtable_type);
> +
> +	BUG_ON(!pa);
> +	return pa;
>   }
>
>   /*
> @@ -1616,3 +1633,202 @@ int arch_set_user_pkey_access(struct task_struct *tsk,
> int pkey, unsigned long i
>   	return 0;
>   }
>   #endif
> +
> +static void split_contpte(pte_t *ptep)
> +{
> +	int i;
> +
> +	ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
> +	for (i = 0; i < CONT_PTES; i++, ptep++)
> +		__set_pte(ptep, pte_mknoncont(__ptep_get(ptep)));
> +}
> +
> +static int split_pmd(pmd_t *pmdp, pmd_t pmd)
> +{
> +	pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
> +	unsigned long pfn = pmd_pfn(pmd);
> +	pgprot_t prot = pmd_pgprot(pmd);
> +	phys_addr_t pte_phys;
> +	pte_t *ptep;
> +	int i;
> +
> +	pte_phys = try_pgd_pgtable_alloc_init_mm(TABLE_PTE);
> +	if (pte_phys == INVALID_PHYS_ADDR)
> +		return -ENOMEM;
> +	ptep = (pte_t *)phys_to_virt(pte_phys);
> +
> +	if (pgprot_val(prot) & PMD_SECT_PXN)
> +		tableprot |= PMD_TABLE_PXN;
> +
> +	prot = __pgprot((pgprot_val(prot) & ~PTE_TYPE_MASK) | PTE_TYPE_PAGE);
> +	prot = __pgprot(pgprot_val(prot) | PTE_CONT);
> +
> +	for (i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
> +		__set_pte(ptep, pfn_pte(pfn, prot));
> +
> +	/*
> +	 * Ensure the pte entries are visible to the table walker by the time
> +	 * the pmd entry that points to the ptes is visible.
> +	 */
> +	dsb(ishst);
> +
> +	// TODO: THIS NEEDS TO BE CMPXCHG THEN FREE THE TABLE IF WE LOST.
> +	__pmd_populate(pmdp, pte_phys, tableprot);
> +
> +	return 0;
> +}
> +
> +static void split_contpmd(pmd_t *pmdp)
> +{
> +	int i;
> +
> +	pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS);
> +	for (i = 0; i < CONT_PMDS; i++, pmdp++)
> +		set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp)));
> +}
> +
> +static int split_pud(pud_t *pudp, pud_t pud)
> +{
> +	pudval_t tableprot = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
> +	unsigned int step = PMD_SIZE >> PAGE_SHIFT;
> +	unsigned long pfn = pud_pfn(pud);
> +	pgprot_t prot = pud_pgprot(pud);
> +	phys_addr_t pmd_phys;
> +	pmd_t *pmdp;
> +	int i;
> +
> +	pmd_phys = try_pgd_pgtable_alloc_init_mm(TABLE_PMD);
> +	if (pmd_phys == INVALID_PHYS_ADDR)
> +		return -ENOMEM;
> +	pmdp = (pmd_t *)phys_to_virt(pmd_phys);
> +
> +	if (pgprot_val(prot) & PMD_SECT_PXN)
> +		tableprot |= PUD_TABLE_PXN;
> +
> +	prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT);
> +	prot = __pgprot(pgprot_val(prot) | PTE_CONT);
> +
> +	for (i = 0; i < PTRS_PER_PMD; i++, pmdp++, pfn += step)
> +		set_pmd(pmdp, pfn_pmd(pfn, prot));
> +
> +	/*
> +	 * Ensure the pmd entries are visible to the table walker by the time
> +	 * the pud entry that points to the pmds is visible.
> +	 */
> +	dsb(ishst);
> +
> +	// TODO: THIS NEEDS TO BE CMPXCHG THEN FREE THE TABLE IF WE LOST.
> +	__pud_populate(pudp, pmd_phys, tableprot);
> +
> +	return 0;
> +}
> +
> +int split_leaf_mapping(unsigned long addr)

Thanks for coming up with the code. It does help to understand your 
idea. Now I see why you suggested "split_mapping(start); 
split_mapping(end);" model. It does make the implementation easier 
because we don't need a loop anymore. But this may have a couple of 
problems:
   1. We need walk the page table twice instead of once. It sounds 
expensive.
   2. How should we handle repainting? We need split all the page tables 
all the way down to PTE for repainting between start and end rather than 
keeping block mappings. This model doesn't work, right? For example, 
repaint a 2G block. The first 1G is mapped by a PUD, the second 1G is 
mapped by 511 PMD and 512 PTEs. split_mapping(start) will split the 
first 1G, but split_mapping(end) will do nothing, the 511 PMDs are kept 
intact. In addition, I think we also prefer reuse the split primitive 
for repainting instead of inventing another one.

Thanks,
Yang

> +{
> +	pgd_t *pgdp, pgd;
> +	p4d_t *p4dp, p4d;
> +	pud_t *pudp, pud;
> +	pmd_t *pmdp, pmd;
> +	pte_t *ptep, pte;
> +	int ret = 0;
> +
> +	/*
> +	 * !BBML2_NOABORT systems should not be trying to change permissions on
> +	 * anything that is not pte-mapped in the first place. Just return early
> +	 * and let the permission change code raise a warning if not already
> +	 * pte-mapped.
> +	 */
> +	if (!system_supports_bbml2_noabort())
> +		return 0;
> +
> +	/*
> +	 * Ensure addr is at least page-aligned since this is the finest
> +	 * granularity we can split to.
> +	 */
> +	if (addr != PAGE_ALIGN(addr))
> +		return -EINVAL;
> +
> +	arch_enter_lazy_mmu_mode();
> +
> +	/*
> +	 * PGD: If addr is PGD aligned then addr already describes a leaf
> +	 * boundary. If not present then there is nothing to split.
> +	 */
> +	if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr)
> +		goto out;
> +	pgdp = pgd_offset_k(addr);
> +	pgd = pgdp_get(pgdp);
> +	if (!pgd_present(pgd))
> +		goto out;
> +
> +	/*
> +	 * P4D: If addr is P4D aligned then addr already describes a leaf
> +	 * boundary. If not present then there is nothing to split.
> +	 */
> +	if (ALIGN_DOWN(addr, P4D_SIZE) == addr)
> +		goto out;
> +	p4dp = p4d_offset(pgdp, addr);
> +	p4d = p4dp_get(p4dp);
> +	if (!p4d_present(p4d))
> +		goto out;
> +
> +	/*
> +	 * PUD: If addr is PUD aligned then addr already describes a leaf
> +	 * boundary. If not present then there is nothing to split. Otherwise,
> +	 * if we have a pud leaf, split to contpmd.
> +	 */
> +	if (ALIGN_DOWN(addr, PUD_SIZE) == addr)
> +		goto out;
> +	pudp = pud_offset(p4dp, addr);
> +	pud = pudp_get(pudp);
> +	if (!pud_present(pud))
> +		goto out;
> +	if (pud_leaf(pud)) {
> +		ret = split_pud(pudp, pud);
> +		if (ret)
> +			goto out;
> +	}
> +
> +	/*
> +	 * CONTPMD: If addr is CONTPMD aligned then addr already describes a
> +	 * leaf boundary. If not present then there is nothing to split.
> +	 * Otherwise, if we have a contpmd leaf, split to pmd.
> +	 */
> +	if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
> +		goto out;
> +	pmdp = pmd_offset(pudp, addr);
> +	pmd = pmdp_get(pmdp);
> +	if (!pmd_present(pmd))
> +		goto out;
> +	if (pmd_leaf(pmd)) {
> +		if (pmd_cont(pmd))
> +			split_contpmd(pmdp);
> +		/*
> +		 * PMD: If addr is PMD aligned then addr already describes a
> +		 * leaf boundary. Otherwise, split to contpte.
> +		 */
> +		if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
> +			goto out;
> +		ret = split_pmd(pmdp, pmd);
> +		if (ret)
> +			goto out;
> +	}
> +
> +	/*
> +	 * CONTPTE: If addr is CONTPTE aligned then addr already describes a
> +	 * leaf boundary. If not present then there is nothing to split.
> +	 * Otherwise, if we have a contpte leaf, split to pte.
> +	 */
> +	if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
> +		goto out;
> +	ptep = pte_offset_kernel(pmdp, addr);
> +	pte = __ptep_get(ptep);
> +	if (!pte_present(pte))
> +		goto out;
> +	if (pte_cont(pte))
> +		split_contpte(ptep);
> +
> +out:
> +	arch_leave_lazy_mmu_mode();
> +	return ret;
> +}
> ---8<---
>
> Thanks,
> Ryan
>

Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Ryan Roberts 5 months, 3 weeks ago
[...]

>> +
>> +int split_leaf_mapping(unsigned long addr)
> 
> Thanks for coming up with the code. It does help to understand your idea. Now I
> see why you suggested "split_mapping(start); split_mapping(end);" model. It does
> make the implementation easier because we don't need a loop anymore. But this
> may have a couple of problems:
>   1. We need walk the page table twice instead of once. It sounds expensive.

Yes we need to walk twice. That may be more expensive or less expensive,
depending on the size of the range that you are splitting. If the range is large
then your approach loops through every leaf mapping between the start and end
which will be more expensive than just doing 2 walks. If the range is small then
your approach can avoid the second walk, but at the expense of all the extra
loop overhead.

My suggestion requires 5 loads (assuming the maximum of 5 levels of lookup).
Personally I think this is probably acceptable? Perhaps we need some other
voices here.


>   2. How should we handle repainting? We need split all the page tables all the
> way down to PTE for repainting between start and end rather than keeping block
> mappings. This model doesn't work, right? For example, repaint a 2G block. The
> first 1G is mapped by a PUD, the second 1G is mapped by 511 PMD and 512 PTEs.
> split_mapping(start) will split the first 1G, but split_mapping(end) will do
> nothing, the 511 PMDs are kept intact. In addition, I think we also prefer reuse
> the split primitive for repainting instead of inventing another one.

I agree my approach doesn't work for the repainting case. But I think what I'm
trying to say is that the 2 things are different operations;
split_leaf_mapping() is just trying to ensure that the start and end of a ragion
are on leaf boundaries. Repainting is trying to ensure that all leaf mappings
within a range are PTE-size. I've implemented the former and you've implemented
that latter. Your implementation looks like meets the former's requirements
because you are only testing it for the case where the range is 1 page. But
actually it is splitting everything in the range to PTEs.

Thanks,
Ryan

> 
> Thanks,
> Yang
> 
>> +{
>> +    pgd_t *pgdp, pgd;
>> +    p4d_t *p4dp, p4d;
>> +    pud_t *pudp, pud;
>> +    pmd_t *pmdp, pmd;
>> +    pte_t *ptep, pte;
>> +    int ret = 0;
>> +
>> +    /*
>> +     * !BBML2_NOABORT systems should not be trying to change permissions on
>> +     * anything that is not pte-mapped in the first place. Just return early
>> +     * and let the permission change code raise a warning if not already
>> +     * pte-mapped.
>> +     */
>> +    if (!system_supports_bbml2_noabort())
>> +        return 0;
>> +
>> +    /*
>> +     * Ensure addr is at least page-aligned since this is the finest
>> +     * granularity we can split to.
>> +     */
>> +    if (addr != PAGE_ALIGN(addr))
>> +        return -EINVAL;
>> +
>> +    arch_enter_lazy_mmu_mode();
>> +
>> +    /*
>> +     * PGD: If addr is PGD aligned then addr already describes a leaf
>> +     * boundary. If not present then there is nothing to split.
>> +     */
>> +    if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr)
>> +        goto out;
>> +    pgdp = pgd_offset_k(addr);
>> +    pgd = pgdp_get(pgdp);
>> +    if (!pgd_present(pgd))
>> +        goto out;
>> +
>> +    /*
>> +     * P4D: If addr is P4D aligned then addr already describes a leaf
>> +     * boundary. If not present then there is nothing to split.
>> +     */
>> +    if (ALIGN_DOWN(addr, P4D_SIZE) == addr)
>> +        goto out;
>> +    p4dp = p4d_offset(pgdp, addr);
>> +    p4d = p4dp_get(p4dp);
>> +    if (!p4d_present(p4d))
>> +        goto out;
>> +
>> +    /*
>> +     * PUD: If addr is PUD aligned then addr already describes a leaf
>> +     * boundary. If not present then there is nothing to split. Otherwise,
>> +     * if we have a pud leaf, split to contpmd.
>> +     */
>> +    if (ALIGN_DOWN(addr, PUD_SIZE) == addr)
>> +        goto out;
>> +    pudp = pud_offset(p4dp, addr);
>> +    pud = pudp_get(pudp);
>> +    if (!pud_present(pud))
>> +        goto out;
>> +    if (pud_leaf(pud)) {
>> +        ret = split_pud(pudp, pud);
>> +        if (ret)
>> +            goto out;
>> +    }
>> +
>> +    /*
>> +     * CONTPMD: If addr is CONTPMD aligned then addr already describes a
>> +     * leaf boundary. If not present then there is nothing to split.
>> +     * Otherwise, if we have a contpmd leaf, split to pmd.
>> +     */
>> +    if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
>> +        goto out;
>> +    pmdp = pmd_offset(pudp, addr);
>> +    pmd = pmdp_get(pmdp);
>> +    if (!pmd_present(pmd))
>> +        goto out;
>> +    if (pmd_leaf(pmd)) {
>> +        if (pmd_cont(pmd))
>> +            split_contpmd(pmdp);
>> +        /*
>> +         * PMD: If addr is PMD aligned then addr already describes a
>> +         * leaf boundary. Otherwise, split to contpte.
>> +         */
>> +        if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
>> +            goto out;
>> +        ret = split_pmd(pmdp, pmd);
>> +        if (ret)
>> +            goto out;
>> +    }
>> +
>> +    /*
>> +     * CONTPTE: If addr is CONTPTE aligned then addr already describes a
>> +     * leaf boundary. If not present then there is nothing to split.
>> +     * Otherwise, if we have a contpte leaf, split to pte.
>> +     */
>> +    if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
>> +        goto out;
>> +    ptep = pte_offset_kernel(pmdp, addr);
>> +    pte = __ptep_get(ptep);
>> +    if (!pte_present(pte))
>> +        goto out;
>> +    if (pte_cont(pte))
>> +        split_contpte(ptep);
>> +
>> +out:
>> +    arch_leave_lazy_mmu_mode();
>> +    return ret;
>> +}
>> ---8<---
>>
>> Thanks,
>> Ryan
>>
> 

Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Dev Jain 4 months, 3 weeks ago
On 23/06/25 6:56 pm, Ryan Roberts wrote:
> [...]
>
>>> +
>>> +int split_leaf_mapping(unsigned long addr)
>> Thanks for coming up with the code. It does help to understand your idea. Now I
>> see why you suggested "split_mapping(start); split_mapping(end);" model. It does
>> make the implementation easier because we don't need a loop anymore. But this
>> may have a couple of problems:
>>    1. We need walk the page table twice instead of once. It sounds expensive.
> Yes we need to walk twice. That may be more expensive or less expensive,
> depending on the size of the range that you are splitting. If the range is large
> then your approach loops through every leaf mapping between the start and end
> which will be more expensive than just doing 2 walks. If the range is small then
> your approach can avoid the second walk, but at the expense of all the extra
> loop overhead.
>
> My suggestion requires 5 loads (assuming the maximum of 5 levels of lookup).
> Personally I think this is probably acceptable? Perhaps we need some other
> voices here.

Hello all,

I am starting to implement vmalloc-huge by default with BBML2 no-abort on arm64.
I see that there is some disagreement related to the way the splitting needs to
be implemented - I skimmed through the discussions and it will require some work
to understand what is going on :) hopefully I'll be back soon to give some of
my opinions.

>
>
Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 4 months, 3 weeks ago

On 7/23/25 10:38 AM, Dev Jain wrote:
>
> On 23/06/25 6:56 pm, Ryan Roberts wrote:
>> [...]
>>
>>>> +
>>>> +int split_leaf_mapping(unsigned long addr)
>>> Thanks for coming up with the code. It does help to understand your 
>>> idea. Now I
>>> see why you suggested "split_mapping(start); split_mapping(end);" 
>>> model. It does
>>> make the implementation easier because we don't need a loop anymore. 
>>> But this
>>> may have a couple of problems:
>>>    1. We need walk the page table twice instead of once. It sounds 
>>> expensive.
>> Yes we need to walk twice. That may be more expensive or less expensive,
>> depending on the size of the range that you are splitting. If the 
>> range is large
>> then your approach loops through every leaf mapping between the start 
>> and end
>> which will be more expensive than just doing 2 walks. If the range is 
>> small then
>> your approach can avoid the second walk, but at the expense of all 
>> the extra
>> loop overhead.
>>
>> My suggestion requires 5 loads (assuming the maximum of 5 levels of 
>> lookup).
>> Personally I think this is probably acceptable? Perhaps we need some 
>> other
>> voices here.
>
> Hello all,
>
> I am starting to implement vmalloc-huge by default with BBML2 no-abort 
> on arm64.
> I see that there is some disagreement related to the way the splitting 
> needs to
> be implemented - I skimmed through the discussions and it will require 
> some work
> to understand what is going on :) hopefully I'll be back soon to give 
> some of
> my opinions.

Hi Dev,

Thanks for the heads up.

In the last email I suggested skip the leaf mappings in the split range 
in order to reduce page table walk overhead for split_mapping(start, 
end). In this way we can achieve:
     - reuse the most split code for repainting (just need 
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS flag for repainting to split page 
table to PTEs)
     - just walk page table once
     - have similar page table walk overhead with 
split_mapping(start)/split_mapping(end) if the split range is large

I'm basically done on a new spin to implement it and solve all the 
review comments from v4. I should be able to post the new spin by the 
end of this week.

Regards,
Yang

>
>>
>>

Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Dev Jain 4 months, 3 weeks ago
On 24/07/25 2:21 am, Yang Shi wrote:
>
>
> On 7/23/25 10:38 AM, Dev Jain wrote:
>>
>> On 23/06/25 6:56 pm, Ryan Roberts wrote:
>>> [...]
>>>
>>>>> +
>>>>> +int split_leaf_mapping(unsigned long addr)
>>>> Thanks for coming up with the code. It does help to understand your 
>>>> idea. Now I
>>>> see why you suggested "split_mapping(start); split_mapping(end);" 
>>>> model. It does
>>>> make the implementation easier because we don't need a loop 
>>>> anymore. But this
>>>> may have a couple of problems:
>>>>    1. We need walk the page table twice instead of once. It sounds 
>>>> expensive.
>>> Yes we need to walk twice. That may be more expensive or less 
>>> expensive,
>>> depending on the size of the range that you are splitting. If the 
>>> range is large
>>> then your approach loops through every leaf mapping between the 
>>> start and end
>>> which will be more expensive than just doing 2 walks. If the range 
>>> is small then
>>> your approach can avoid the second walk, but at the expense of all 
>>> the extra
>>> loop overhead.
>>>
>>> My suggestion requires 5 loads (assuming the maximum of 5 levels of 
>>> lookup).
>>> Personally I think this is probably acceptable? Perhaps we need some 
>>> other
>>> voices here.
>>
>> Hello all,
>>
>> I am starting to implement vmalloc-huge by default with BBML2 
>> no-abort on arm64.
>> I see that there is some disagreement related to the way the 
>> splitting needs to
>> be implemented - I skimmed through the discussions and it will 
>> require some work
>> to understand what is going on :) hopefully I'll be back soon to give 
>> some of
>> my opinions.
>
> Hi Dev,
>
> Thanks for the heads up.
>
> In the last email I suggested skip the leaf mappings in the split 
> range in order to reduce page table walk overhead for 
> split_mapping(start, end). In this way we can achieve:
>     - reuse the most split code for repainting (just need 
> NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS flag for repainting to split page 
> table to PTEs)
>     - just walk page table once
>     - have similar page table walk overhead with 
> split_mapping(start)/split_mapping(end) if the split range is large
>
> I'm basically done on a new spin to implement it and solve all the 
> review comments from v4. I should be able to post the new spin by the 
> end of this week.

Great! As Catalin notes on my huge-perm change series, that series 
doesn't have any user so it does not make sense for that to go in 
without your

series - can you merge that series into your work for the new version?


>
> Regards,
> Yang
>
>>
>>>
>>>
>
Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 4 months, 3 weeks ago

On 7/24/25 4:43 AM, Dev Jain wrote:
>
> On 24/07/25 2:21 am, Yang Shi wrote:
>>
>>
>> On 7/23/25 10:38 AM, Dev Jain wrote:
>>>
>>> On 23/06/25 6:56 pm, Ryan Roberts wrote:
>>>> [...]
>>>>
>>>>>> +
>>>>>> +int split_leaf_mapping(unsigned long addr)
>>>>> Thanks for coming up with the code. It does help to understand 
>>>>> your idea. Now I
>>>>> see why you suggested "split_mapping(start); split_mapping(end);" 
>>>>> model. It does
>>>>> make the implementation easier because we don't need a loop 
>>>>> anymore. But this
>>>>> may have a couple of problems:
>>>>>    1. We need walk the page table twice instead of once. It sounds 
>>>>> expensive.
>>>> Yes we need to walk twice. That may be more expensive or less 
>>>> expensive,
>>>> depending on the size of the range that you are splitting. If the 
>>>> range is large
>>>> then your approach loops through every leaf mapping between the 
>>>> start and end
>>>> which will be more expensive than just doing 2 walks. If the range 
>>>> is small then
>>>> your approach can avoid the second walk, but at the expense of all 
>>>> the extra
>>>> loop overhead.
>>>>
>>>> My suggestion requires 5 loads (assuming the maximum of 5 levels of 
>>>> lookup).
>>>> Personally I think this is probably acceptable? Perhaps we need 
>>>> some other
>>>> voices here.
>>>
>>> Hello all,
>>>
>>> I am starting to implement vmalloc-huge by default with BBML2 
>>> no-abort on arm64.
>>> I see that there is some disagreement related to the way the 
>>> splitting needs to
>>> be implemented - I skimmed through the discussions and it will 
>>> require some work
>>> to understand what is going on :) hopefully I'll be back soon to 
>>> give some of
>>> my opinions.
>>
>> Hi Dev,
>>
>> Thanks for the heads up.
>>
>> In the last email I suggested skip the leaf mappings in the split 
>> range in order to reduce page table walk overhead for 
>> split_mapping(start, end). In this way we can achieve:
>>     - reuse the most split code for repainting (just need 
>> NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS flag for repainting to split 
>> page table to PTEs)
>>     - just walk page table once
>>     - have similar page table walk overhead with 
>> split_mapping(start)/split_mapping(end) if the split range is large
>>
>> I'm basically done on a new spin to implement it and solve all the 
>> review comments from v4. I should be able to post the new spin by the 
>> end of this week.
>
> Great! As Catalin notes on my huge-perm change series, that series 
> doesn't have any user so it does not make sense for that to go in 
> without your
>
> series - can you merge that series into your work for the new version?

Yeah, sure. But I saw Andrew had some nits on the generic mm code part. 
The other way is you can have your patch applied on top of my series so 
that we don't interlock each other. Anyway I will still keep it as 
prerequisite of my series for now.

Yang

>
>
>>
>> Regards,
>> Yang
>>
>>>
>>>>
>>>>
>>

Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 5 months, 3 weeks ago

On 6/23/25 6:26 AM, Ryan Roberts wrote:
> [...]
>
>>> +
>>> +int split_leaf_mapping(unsigned long addr)
>> Thanks for coming up with the code. It does help to understand your idea. Now I
>> see why you suggested "split_mapping(start); split_mapping(end);" model. It does
>> make the implementation easier because we don't need a loop anymore. But this
>> may have a couple of problems:
>>    1. We need walk the page table twice instead of once. It sounds expensive.
> Yes we need to walk twice. That may be more expensive or less expensive,
> depending on the size of the range that you are splitting. If the range is large
> then your approach loops through every leaf mapping between the start and end
> which will be more expensive than just doing 2 walks. If the range is small then
> your approach can avoid the second walk, but at the expense of all the extra
> loop overhead.

Thinking about this further. Although there is some extra loop overhead, 
but there should be not extra loads. We can check whether the start and 
end are properly aligned or not, it they are aligned, we just continue 
the loop without loading page table entry.

And we can optimize the loop by advancing multiple PUD/PMD/CONT size at 
a time instead of one at a time. The pseudo code (for example, pmd 
level) looks like:

do {
      next = pmd_addr_end(start, end);

      if (next < end)
          nr = ((end - next) / PMD_SIZE) + 1;

      if (((start | next) & ~PMD_MASK) == 0)
          continue;

      split_pmd(start, next);
} while (pmdp += nr, start = next * nr, start != end)


For repainting case, we just need do:

do {
      nr = 1;
      next = pmd_addr_end(start, end);

      if (next < end && !repainting)
          nr = ((end - next) / PMD_SIZE) + 1;

      if (((start | next) & ~PMD_MASK) == 0 && !repainting)
          continue;

      split_pmd(start, next);
} while (pmdp += nr, start = next * nr, start != end)

This should reduce loop overhead and duplicate code for repainting.

Thanks,
Yang

>
> My suggestion requires 5 loads (assuming the maximum of 5 levels of lookup).
> Personally I think this is probably acceptable? Perhaps we need some other
> voices here.
>
>
>>    2. How should we handle repainting? We need split all the page tables all the
>> way down to PTE for repainting between start and end rather than keeping block
>> mappings. This model doesn't work, right? For example, repaint a 2G block. The
>> first 1G is mapped by a PUD, the second 1G is mapped by 511 PMD and 512 PTEs.
>> split_mapping(start) will split the first 1G, but split_mapping(end) will do
>> nothing, the 511 PMDs are kept intact. In addition, I think we also prefer reuse
>> the split primitive for repainting instead of inventing another one.
> I agree my approach doesn't work for the repainting case. But I think what I'm
> trying to say is that the 2 things are different operations;
> split_leaf_mapping() is just trying to ensure that the start and end of a ragion
> are on leaf boundaries. Repainting is trying to ensure that all leaf mappings
> within a range are PTE-size. I've implemented the former and you've implemented
> that latter. Your implementation looks like meets the former's requirements
> because you are only testing it for the case where the range is 1 page. But
> actually it is splitting everything in the range to PTEs.
>
> Thanks,
> Ryan
>
>> Thanks,
>> Yang
>>
>>> +{
>>> +    pgd_t *pgdp, pgd;
>>> +    p4d_t *p4dp, p4d;
>>> +    pud_t *pudp, pud;
>>> +    pmd_t *pmdp, pmd;
>>> +    pte_t *ptep, pte;
>>> +    int ret = 0;
>>> +
>>> +    /*
>>> +     * !BBML2_NOABORT systems should not be trying to change permissions on
>>> +     * anything that is not pte-mapped in the first place. Just return early
>>> +     * and let the permission change code raise a warning if not already
>>> +     * pte-mapped.
>>> +     */
>>> +    if (!system_supports_bbml2_noabort())
>>> +        return 0;
>>> +
>>> +    /*
>>> +     * Ensure addr is at least page-aligned since this is the finest
>>> +     * granularity we can split to.
>>> +     */
>>> +    if (addr != PAGE_ALIGN(addr))
>>> +        return -EINVAL;
>>> +
>>> +    arch_enter_lazy_mmu_mode();
>>> +
>>> +    /*
>>> +     * PGD: If addr is PGD aligned then addr already describes a leaf
>>> +     * boundary. If not present then there is nothing to split.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr)
>>> +        goto out;
>>> +    pgdp = pgd_offset_k(addr);
>>> +    pgd = pgdp_get(pgdp);
>>> +    if (!pgd_present(pgd))
>>> +        goto out;
>>> +
>>> +    /*
>>> +     * P4D: If addr is P4D aligned then addr already describes a leaf
>>> +     * boundary. If not present then there is nothing to split.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, P4D_SIZE) == addr)
>>> +        goto out;
>>> +    p4dp = p4d_offset(pgdp, addr);
>>> +    p4d = p4dp_get(p4dp);
>>> +    if (!p4d_present(p4d))
>>> +        goto out;
>>> +
>>> +    /*
>>> +     * PUD: If addr is PUD aligned then addr already describes a leaf
>>> +     * boundary. If not present then there is nothing to split. Otherwise,
>>> +     * if we have a pud leaf, split to contpmd.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, PUD_SIZE) == addr)
>>> +        goto out;
>>> +    pudp = pud_offset(p4dp, addr);
>>> +    pud = pudp_get(pudp);
>>> +    if (!pud_present(pud))
>>> +        goto out;
>>> +    if (pud_leaf(pud)) {
>>> +        ret = split_pud(pudp, pud);
>>> +        if (ret)
>>> +            goto out;
>>> +    }
>>> +
>>> +    /*
>>> +     * CONTPMD: If addr is CONTPMD aligned then addr already describes a
>>> +     * leaf boundary. If not present then there is nothing to split.
>>> +     * Otherwise, if we have a contpmd leaf, split to pmd.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
>>> +        goto out;
>>> +    pmdp = pmd_offset(pudp, addr);
>>> +    pmd = pmdp_get(pmdp);
>>> +    if (!pmd_present(pmd))
>>> +        goto out;
>>> +    if (pmd_leaf(pmd)) {
>>> +        if (pmd_cont(pmd))
>>> +            split_contpmd(pmdp);
>>> +        /*
>>> +         * PMD: If addr is PMD aligned then addr already describes a
>>> +         * leaf boundary. Otherwise, split to contpte.
>>> +         */
>>> +        if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
>>> +            goto out;
>>> +        ret = split_pmd(pmdp, pmd);
>>> +        if (ret)
>>> +            goto out;
>>> +    }
>>> +
>>> +    /*
>>> +     * CONTPTE: If addr is CONTPTE aligned then addr already describes a
>>> +     * leaf boundary. If not present then there is nothing to split.
>>> +     * Otherwise, if we have a contpte leaf, split to pte.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
>>> +        goto out;
>>> +    ptep = pte_offset_kernel(pmdp, addr);
>>> +    pte = __ptep_get(ptep);
>>> +    if (!pte_present(pte))
>>> +        goto out;
>>> +    if (pte_cont(pte))
>>> +        split_contpte(ptep);
>>> +
>>> +out:
>>> +    arch_leave_lazy_mmu_mode();
>>> +    return ret;
>>> +}
>>> ---8<---
>>>
>>> Thanks,
>>> Ryan
>>>

Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 5 months, 3 weeks ago

On 6/23/25 6:26 AM, Ryan Roberts wrote:
> [...]
>
>>> +
>>> +int split_leaf_mapping(unsigned long addr)
>> Thanks for coming up with the code. It does help to understand your idea. Now I
>> see why you suggested "split_mapping(start); split_mapping(end);" model. It does
>> make the implementation easier because we don't need a loop anymore. But this
>> may have a couple of problems:
>>    1. We need walk the page table twice instead of once. It sounds expensive.
> Yes we need to walk twice. That may be more expensive or less expensive,
> depending on the size of the range that you are splitting. If the range is large
> then your approach loops through every leaf mapping between the start and end
> which will be more expensive than just doing 2 walks. If the range is small then
> your approach can avoid the second walk, but at the expense of all the extra
> loop overhead.

Yes, it depends on the page table layout (the more fragmented the more 
loads) and the range passed in by the callers. But AFAICT, the most 
existing callers just try to change permission on page basis. I know you 
are looking at adding more block/cont mapping support for vmalloc, but 
will the large range case dominate?

>
> My suggestion requires 5 loads (assuming the maximum of 5 levels of lookup).
> Personally I think this is probably acceptable? Perhaps we need some other
> voices here.

Doesn't it require 10 loads for both start and end together? The 5 loads 
for end may be fast since they are likely cached if they fall into the 
same PGD/P4D/PUD/PMD.

>
>
>>    2. How should we handle repainting? We need split all the page tables all the
>> way down to PTE for repainting between start and end rather than keeping block
>> mappings. This model doesn't work, right? For example, repaint a 2G block. The
>> first 1G is mapped by a PUD, the second 1G is mapped by 511 PMD and 512 PTEs.
>> split_mapping(start) will split the first 1G, but split_mapping(end) will do
>> nothing, the 511 PMDs are kept intact. In addition, I think we also prefer reuse
>> the split primitive for repainting instead of inventing another one.
> I agree my approach doesn't work for the repainting case. But I think what I'm
> trying to say is that the 2 things are different operations;
> split_leaf_mapping() is just trying to ensure that the start and end of a ragion
> are on leaf boundaries. Repainting is trying to ensure that all leaf mappings
> within a range are PTE-size. I've implemented the former and you've implemented
> that latter. Your implementation looks like meets the former's requirements
> because you are only testing it for the case where the range is 1 page. But
> actually it is splitting everything in the range to PTEs.

I can understand why you saw they are two different operations. And the 
repainting is basically one-off thing. However they share a lot of 
common logic (for example, allocate page table, populate new page table 
entries, etc) from code point of view. Repainting is just a special case 
of split (no block and cont mappings) in this perspective. If we 
implement them separately, I can see there will be a lot of duplicate 
code. I'm not sure whether this is preferred or not.

Thanks,
Yang

>
> Thanks,
> Ryan
>
>> Thanks,
>> Yang
>>
>>> +{
>>> +    pgd_t *pgdp, pgd;
>>> +    p4d_t *p4dp, p4d;
>>> +    pud_t *pudp, pud;
>>> +    pmd_t *pmdp, pmd;
>>> +    pte_t *ptep, pte;
>>> +    int ret = 0;
>>> +
>>> +    /*
>>> +     * !BBML2_NOABORT systems should not be trying to change permissions on
>>> +     * anything that is not pte-mapped in the first place. Just return early
>>> +     * and let the permission change code raise a warning if not already
>>> +     * pte-mapped.
>>> +     */
>>> +    if (!system_supports_bbml2_noabort())
>>> +        return 0;
>>> +
>>> +    /*
>>> +     * Ensure addr is at least page-aligned since this is the finest
>>> +     * granularity we can split to.
>>> +     */
>>> +    if (addr != PAGE_ALIGN(addr))
>>> +        return -EINVAL;
>>> +
>>> +    arch_enter_lazy_mmu_mode();
>>> +
>>> +    /*
>>> +     * PGD: If addr is PGD aligned then addr already describes a leaf
>>> +     * boundary. If not present then there is nothing to split.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr)
>>> +        goto out;
>>> +    pgdp = pgd_offset_k(addr);
>>> +    pgd = pgdp_get(pgdp);
>>> +    if (!pgd_present(pgd))
>>> +        goto out;
>>> +
>>> +    /*
>>> +     * P4D: If addr is P4D aligned then addr already describes a leaf
>>> +     * boundary. If not present then there is nothing to split.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, P4D_SIZE) == addr)
>>> +        goto out;
>>> +    p4dp = p4d_offset(pgdp, addr);
>>> +    p4d = p4dp_get(p4dp);
>>> +    if (!p4d_present(p4d))
>>> +        goto out;
>>> +
>>> +    /*
>>> +     * PUD: If addr is PUD aligned then addr already describes a leaf
>>> +     * boundary. If not present then there is nothing to split. Otherwise,
>>> +     * if we have a pud leaf, split to contpmd.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, PUD_SIZE) == addr)
>>> +        goto out;
>>> +    pudp = pud_offset(p4dp, addr);
>>> +    pud = pudp_get(pudp);
>>> +    if (!pud_present(pud))
>>> +        goto out;
>>> +    if (pud_leaf(pud)) {
>>> +        ret = split_pud(pudp, pud);
>>> +        if (ret)
>>> +            goto out;
>>> +    }
>>> +
>>> +    /*
>>> +     * CONTPMD: If addr is CONTPMD aligned then addr already describes a
>>> +     * leaf boundary. If not present then there is nothing to split.
>>> +     * Otherwise, if we have a contpmd leaf, split to pmd.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
>>> +        goto out;
>>> +    pmdp = pmd_offset(pudp, addr);
>>> +    pmd = pmdp_get(pmdp);
>>> +    if (!pmd_present(pmd))
>>> +        goto out;
>>> +    if (pmd_leaf(pmd)) {
>>> +        if (pmd_cont(pmd))
>>> +            split_contpmd(pmdp);
>>> +        /*
>>> +         * PMD: If addr is PMD aligned then addr already describes a
>>> +         * leaf boundary. Otherwise, split to contpte.
>>> +         */
>>> +        if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
>>> +            goto out;
>>> +        ret = split_pmd(pmdp, pmd);
>>> +        if (ret)
>>> +            goto out;
>>> +    }
>>> +
>>> +    /*
>>> +     * CONTPTE: If addr is CONTPTE aligned then addr already describes a
>>> +     * leaf boundary. If not present then there is nothing to split.
>>> +     * Otherwise, if we have a contpte leaf, split to pte.
>>> +     */
>>> +    if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr)
>>> +        goto out;
>>> +    ptep = pte_offset_kernel(pmdp, addr);
>>> +    pte = __ptep_get(ptep);
>>> +    if (!pte_present(pte))
>>> +        goto out;
>>> +    if (pte_cont(pte))
>>> +        split_contpte(ptep);
>>> +
>>> +out:
>>> +    arch_leave_lazy_mmu_mode();
>>> +    return ret;
>>> +}
>>> ---8<---
>>>
>>> Thanks,
>>> Ryan
>>>

Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Ryan Roberts 6 months ago
On 31/05/2025 03:41, Yang Shi wrote:
> When rodata=full is specified, kernel linear mapping has to be mapped at
> PTE level since large page table can't be split due to break-before-make
> rule on ARM64.
> 
> This resulted in a couple of problems:
>   - performance degradation
>   - more TLB pressure
>   - memory waste for kernel page table
> 
> With FEAT_BBM level 2 support, splitting large block page table to
> smaller ones doesn't need to make the page table entry invalid anymore.
> This allows kernel split large block mapping on the fly.
> 
> Add kernel page table split support and use large block mapping by
> default when FEAT_BBM level 2 is supported for rodata=full.  When
> changing permissions for kernel linear mapping, the page table will be
> split to smaller size.
> 
> The machine without FEAT_BBM level 2 will fallback to have kernel linear
> mapping PTE-mapped when rodata=full.
> 
> With this we saw significant performance boost with some benchmarks and
> much less memory consumption on my AmpereOne machine (192 cores, 1P) with
> 256GB memory.
> 
> * Memory use after boot
> Before:
> MemTotal:       258988984 kB
> MemFree:        254821700 kB
> 
> After:
> MemTotal:       259505132 kB
> MemFree:        255410264 kB
> 
> Around 500MB more memory are free to use.  The larger the machine, the
> more memory saved.
> 
> * Memcached
> We saw performance degradation when running Memcached benchmark with
> rodata=full vs rodata=on.  Our profiling pointed to kernel TLB pressure.
> With this patchset we saw ops/sec is increased by around 3.5%, P99
> latency is reduced by around 9.6%.
> The gain mainly came from reduced kernel TLB misses.  The kernel TLB
> MPKI is reduced by 28.5%.
> 
> The benchmark data is now on par with rodata=on too.
> 
> * Disk encryption (dm-crypt) benchmark
> Ran fio benchmark with the below command on a 128G ramdisk (ext4) with disk
> encryption (by dm-crypt).
> fio --directory=/data --random_generator=lfsr --norandommap --randrepeat 1 \
>     --status-interval=999 --rw=write --bs=4k --loops=1 --ioengine=sync \
>     --iodepth=1 --numjobs=1 --fsync_on_close=1 --group_reporting --thread \
>     --name=iops-test-job --eta-newline=1 --size 100G
> 
> The IOPS is increased by 90% - 150% (the variance is high, but the worst
> number of good case is around 90% more than the best number of bad case).
> The bandwidth is increased and the avg clat is reduced proportionally.
> 
> * Sequential file read
> Read 100G file sequentially on XFS (xfs_io read with page cache populated).
> The bandwidth is increased by 150%.
> 
> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
> ---
>  arch/arm64/include/asm/cpufeature.h |  26 +++
>  arch/arm64/include/asm/mmu.h        |   1 +
>  arch/arm64/include/asm/pgtable.h    |  12 +-
>  arch/arm64/kernel/cpufeature.c      |   2 +-
>  arch/arm64/mm/mmu.c                 | 269 +++++++++++++++++++++++++---
>  arch/arm64/mm/pageattr.c            |  37 +++-
>  6 files changed, 319 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index 8f36ffa16b73..a95806980298 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -1053,6 +1053,32 @@ static inline bool cpu_has_lpa2(void)
>  #endif
>  }
>  
> +bool cpu_has_bbml2_noabort(unsigned int cpu_midr);
> +
> +static inline bool has_nobbml2_override(void)
> +{
> +	u64 mmfr2;
> +	unsigned int bbm;
> +
> +	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
> +	mmfr2 &= ~id_aa64mmfr2_override.mask;
> +	mmfr2 |= id_aa64mmfr2_override.val;
> +	bbm = cpuid_feature_extract_unsigned_field(mmfr2,
> +						   ID_AA64MMFR2_EL1_BBM_SHIFT);
> +	return bbm == 0;
> +}
> +
> +/*
> + * Called at early boot stage on boot CPU before cpu info and cpu feature
> + * are ready.
> + */
> +static inline bool bbml2_noabort_available(void)
> +{
> +	return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
> +	       cpu_has_bbml2_noabort(read_cpuid_id()) &&
> +	       !has_nobbml2_override();

Based on Will's feedback, The Kconfig and the cmdline override will both
disappear in Miko's next version and we will only use the MIDR list to decide
BBML2_NOABORT status, so this will significantly simplify. Sorry about the churn
here.

> +}
> +
>  #endif /* __ASSEMBLY__ */
>  
>  #endif
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index 6e8aa8e72601..2693d63bf837 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -71,6 +71,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  			       pgprot_t prot, bool page_mappings_only);
>  extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>  extern void mark_linear_text_alias_ro(void);
> +extern int split_linear_mapping(unsigned long start, unsigned long end);

nit: Perhaps split_leaf_mapping() or split_kernel_pgtable_mapping() or something
similar is more generic which will benefit us in future when using this for
vmalloc too?

>  
>  /*
>   * This check is triggered during the early boot before the cpufeature
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index d3b538be1500..bf3cef31d243 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -293,6 +293,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
>  	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
>  }
>  
> +static inline pmd_t pmd_mknoncont(pmd_t pmd)
> +{
> +	return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
> +}
> +
>  static inline pte_t pte_mkdevmap(pte_t pte)
>  {
>  	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
> @@ -769,7 +774,7 @@ static inline bool in_swapper_pgdir(void *addr)
>  	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
>  }
>  
> -static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
> +static inline void __set_pmd_nosync(pmd_t *pmdp, pmd_t pmd)
>  {
>  #ifdef __PAGETABLE_PMD_FOLDED
>  	if (in_swapper_pgdir(pmdp)) {
> @@ -779,6 +784,11 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>  #endif /* __PAGETABLE_PMD_FOLDED */
>  
>  	WRITE_ONCE(*pmdp, pmd);
> +}
> +
> +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
> +{
> +	__set_pmd_nosync(pmdp, pmd);
>  
>  	if (pmd_valid(pmd)) {
>  		dsb(ishst);
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index e879bfcf853b..5fc2a4a804de 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -2209,7 +2209,7 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
>  	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
>  }
>  
> -static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
> +bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
>  {
>  	/*
>  	 * We want to allow usage of bbml2 in as wide a range of kernel contexts


[...] I'll send a separate response for the mmu.c table walker changes.

>  
> +int split_linear_mapping(unsigned long start, unsigned long end)
> +{
> +	int ret = 0;
> +
> +	if (!system_supports_bbml2_noabort())
> +		return 0;

Hmm... I guess the thinking here is that for !BBML2_NOABORT you are expecting
this function should only be called in the first place if we know we are
pte-mapped. So I guess this is ok... it just means that if we are not
pte-mapped, warnings will be emitted while walking the pgtables (as is the case
today). So I think this approach is ok.

> +
> +	mmap_write_lock(&init_mm);

What is the lock protecting? I was orignally thinking no locking should be
needed because it's not needed for permission changes today; But I think you are
right here and we do need locking; multiple owners could share a large leaf
mapping, I guess? And in that case you could get concurrent attempts to split
from both owners.

I'm not really a fan of adding the extra locking though; It might introduce a
new bottleneck. I wonder if there is a way we could do this locklessly? i.e.
allocate the new table, then cmpxchg to insert and the loser has to free? That
doesn't work for contiguous mappings though...

> +	/* NO_EXEC_MAPPINGS is needed when splitting linear map */
> +	ret = __create_pgd_mapping_locked(init_mm.pgd, virt_to_phys((void *)start),
> +					  start, (end - start), __pgprot(0),
> +					  __pgd_pgtable_alloc,
> +					  NO_EXEC_MAPPINGS | SPLIT_MAPPINGS);
> +	mmap_write_unlock(&init_mm);
> +	flush_tlb_kernel_range(start, end);

I don't believe we should need to flush the TLB when only changing entry sizes
when BBML2 is supported. Miko's series has a massive comment explaining the
reasoning. That applies to user space though. We should consider if this all
works safely for kernel space too, and hopefully remove the flush.

> +
> +	return ret;
> +}
> +
>  /*
>   * This function can only be used to modify existing table entries,
>   * without allocating new levels of table. Note that this permits the
> @@ -676,6 +887,24 @@ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) {
>  
>  #endif /* CONFIG_KFENCE */
>  
> +static inline bool force_pte_mapping(void)
> +{
> +	/*
> +	 * Can't use cpufeature API to determine whether BBML2 supported
> +	 * or not since cpufeature have not been finalized yet.
> +	 *
> +	 * Checking the boot CPU only for now.  If the boot CPU has
> +	 * BBML2, paint linear mapping with block mapping.  If it turns
> +	 * out the secondary CPUs don't support BBML2 once cpufeature is
> +	 * fininalized, the linear mapping will be repainted with PTE
> +	 * mapping.
> +	 */
> +	return (rodata_full && !bbml2_noabort_available()) ||

So this is the case where we don't have BBML2 and need to modify protections at
page granularity - I agree we need to force pte mappings here.

> +		debug_pagealloc_enabled() ||

This is the case where every page is made invalid on free and valid on
allocation, so no point in having block mappings because it will soon degenerate
into page mappings because we will have to split on every allocation. Agree here
too.

> +		arm64_kfence_can_set_direct_map() ||

After looking into how kfence works, I don't agree with this one. It has a
dedicated pool where it allocates from. That pool may be allocated early by the
arch or may be allocated late by the core code. Either way, kfence will only
modify protections within that pool. You current approach is forcing pte
mappings if the pool allocation is late (i.e. not performed by the arch code
during boot). But I think "late" is the most common case; kfence is compiled
into the kernel but not active at boot. Certainly that's how my Ubuntu kernel is
configured. So I think we should just ignore kfence here. If it's "early" then
we map the pool with page granularity (as an optimization). If it's "late" your
splitter will degenerate the whole kfence pool to page mappings over time as
kfence_protect_page() -> set_memory_valid() is called. But the bulk of the
linear map will remain mapped with large blocks.

> +		is_realm_world();

I think the only reason this requires pte mappings is for
__set_memory_enc_dec(). But that can now deal with block mappings given the
ability to split the mappings as needed. So I think this condition can be
removed too.

> +}

Additionally, for can_set_direct_map(); at minimum it's comment should be tidied
up, but really I think it should return true if "BBML2_NOABORT ||
force_pte_mapping()". Because they are the conditions under which we can now
safely modify the linear map.

> +
>  static void __init map_mem(pgd_t *pgdp)
>  {
>  	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
> @@ -701,7 +930,7 @@ static void __init map_mem(pgd_t *pgdp)
>  
>  	early_kfence_pool = arm64_kfence_alloc_pool();
>  
> -	if (can_set_direct_map())
> +	if (force_pte_mapping())
>  		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>  
>  	/*
> @@ -1402,7 +1631,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
>  
>  	VM_BUG_ON(!mhp_range_allowed(start, size, true));
>  
> -	if (can_set_direct_map())
> +	if (force_pte_mapping())
>  		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>  
>  	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 39fd1f7ff02a..25c068712cb5 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -10,6 +10,7 @@
>  #include <linux/vmalloc.h>
>  
>  #include <asm/cacheflush.h>
> +#include <asm/mmu.h>
>  #include <asm/pgtable-prot.h>
>  #include <asm/set_memory.h>
>  #include <asm/tlbflush.h>
> @@ -42,6 +43,8 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>  	struct page_change_data *cdata = data;
>  	pte_t pte = __ptep_get(ptep);
>  
> +	BUG_ON(pte_cont(pte));

I don't think this is required; We want to enable using contiguous mappings
where it makes sense. As long as we have BBML2, we can update contiguous pte
mappings in place, as long as we update all of the ptes in the contiguous block.
split_linear_map() should either have converted to non-cont mappings if the
contiguous block straddled the split point, or would have left as is (or
downgraded a PMD-block to a contpte block) if fully contained within the split
range.

> +
>  	pte = clear_pte_bit(pte, cdata->clear_mask);
>  	pte = set_pte_bit(pte, cdata->set_mask);
>  
> @@ -80,8 +83,9 @@ static int change_memory_common(unsigned long addr, int numpages,
>  	unsigned long start = addr;
>  	unsigned long size = PAGE_SIZE * numpages;
>  	unsigned long end = start + size;
> +	unsigned long l_start;
>  	struct vm_struct *area;
> -	int i;
> +	int i, ret;
>  
>  	if (!PAGE_ALIGNED(addr)) {
>  		start &= PAGE_MASK;
> @@ -118,7 +122,12 @@ static int change_memory_common(unsigned long addr, int numpages,
>  	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
>  			    pgprot_val(clear_mask) == PTE_RDONLY)) {
>  		for (i = 0; i < area->nr_pages; i++) {
> -			__change_memory_common((u64)page_address(area->pages[i]),
> +			l_start = (u64)page_address(area->pages[i]);
> +			ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
> +			if (WARN_ON_ONCE(ret))
> +				return ret;

I don't think this is the right place to integrate; I think the split should be
done inside __change_memory_common(). Then it caters to all possibilities (i.e.
set_memory_valid() and __set_memory_enc_dec()). This means it will run for
vmalloc too, but for now, that will be a nop because everything should already
be split as required on entry and in future we will get that for free.

Once you have integrated Dev's series, the hook becomes
___change_memory_common() (3 underscores)...

> +
> +			__change_memory_common(l_start,
>  					       PAGE_SIZE, set_mask, clear_mask);
>  		}
>  	}
> @@ -174,6 +183,9 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
>  
>  int set_direct_map_invalid_noflush(struct page *page)
>  {
> +	unsigned long l_start;
> +	int ret;
> +
>  	struct page_change_data data = {
>  		.set_mask = __pgprot(0),
>  		.clear_mask = __pgprot(PTE_VALID),
> @@ -182,13 +194,21 @@ int set_direct_map_invalid_noflush(struct page *page)
>  	if (!can_set_direct_map())
>  		return 0;
>  
> +	l_start = (unsigned long)page_address(page);
> +	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
> +	if (WARN_ON_ONCE(ret))
> +		return ret;
> +
>  	return apply_to_page_range(&init_mm,
> -				   (unsigned long)page_address(page),
> -				   PAGE_SIZE, change_page_range, &data);
> +				   l_start, PAGE_SIZE, change_page_range,
> +				   &data);

...and once integrated with Dev's series you don't need any changes here...

>  }
>  
>  int set_direct_map_default_noflush(struct page *page)
>  {
> +	unsigned long l_start;
> +	int ret;
> +
>  	struct page_change_data data = {
>  		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
>  		.clear_mask = __pgprot(PTE_RDONLY),
> @@ -197,9 +217,14 @@ int set_direct_map_default_noflush(struct page *page)
>  	if (!can_set_direct_map())
>  		return 0;
>  
> +	l_start = (unsigned long)page_address(page);
> +	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
> +	if (WARN_ON_ONCE(ret))
> +		return ret;
> +
>  	return apply_to_page_range(&init_mm,
> -				   (unsigned long)page_address(page),
> -				   PAGE_SIZE, change_page_range, &data);
> +				   l_start, PAGE_SIZE, change_page_range,
> +				   &data);

...or here.

Thanks,
Ryan

>  }
>  
>  static int __set_memory_enc_dec(unsigned long addr,
Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Ryan Roberts 6 months ago
On 16/06/2025 12:58, Ryan Roberts wrote:
> On 31/05/2025 03:41, Yang Shi wrote:
>> When rodata=full is specified, kernel linear mapping has to be mapped at
>> PTE level since large page table can't be split due to break-before-make
>> rule on ARM64.
>>
>> This resulted in a couple of problems:
>>   - performance degradation
>>   - more TLB pressure
>>   - memory waste for kernel page table
>>
>> With FEAT_BBM level 2 support, splitting large block page table to
>> smaller ones doesn't need to make the page table entry invalid anymore.
>> This allows kernel split large block mapping on the fly.
>>
>> Add kernel page table split support and use large block mapping by
>> default when FEAT_BBM level 2 is supported for rodata=full.  When
>> changing permissions for kernel linear mapping, the page table will be
>> split to smaller size.
>>
>> The machine without FEAT_BBM level 2 will fallback to have kernel linear
>> mapping PTE-mapped when rodata=full.
>>
>> With this we saw significant performance boost with some benchmarks and
>> much less memory consumption on my AmpereOne machine (192 cores, 1P) with
>> 256GB memory.
>>
>> * Memory use after boot
>> Before:
>> MemTotal:       258988984 kB
>> MemFree:        254821700 kB
>>
>> After:
>> MemTotal:       259505132 kB
>> MemFree:        255410264 kB
>>
>> Around 500MB more memory are free to use.  The larger the machine, the
>> more memory saved.
>>
>> * Memcached
>> We saw performance degradation when running Memcached benchmark with
>> rodata=full vs rodata=on.  Our profiling pointed to kernel TLB pressure.
>> With this patchset we saw ops/sec is increased by around 3.5%, P99
>> latency is reduced by around 9.6%.
>> The gain mainly came from reduced kernel TLB misses.  The kernel TLB
>> MPKI is reduced by 28.5%.
>>
>> The benchmark data is now on par with rodata=on too.
>>
>> * Disk encryption (dm-crypt) benchmark
>> Ran fio benchmark with the below command on a 128G ramdisk (ext4) with disk
>> encryption (by dm-crypt).
>> fio --directory=/data --random_generator=lfsr --norandommap --randrepeat 1 \
>>     --status-interval=999 --rw=write --bs=4k --loops=1 --ioengine=sync \
>>     --iodepth=1 --numjobs=1 --fsync_on_close=1 --group_reporting --thread \
>>     --name=iops-test-job --eta-newline=1 --size 100G
>>
>> The IOPS is increased by 90% - 150% (the variance is high, but the worst
>> number of good case is around 90% more than the best number of bad case).
>> The bandwidth is increased and the avg clat is reduced proportionally.
>>
>> * Sequential file read
>> Read 100G file sequentially on XFS (xfs_io read with page cache populated).
>> The bandwidth is increased by 150%.
>>
>> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
>> ---
>>  arch/arm64/include/asm/cpufeature.h |  26 +++
>>  arch/arm64/include/asm/mmu.h        |   1 +
>>  arch/arm64/include/asm/pgtable.h    |  12 +-
>>  arch/arm64/kernel/cpufeature.c      |   2 +-
>>  arch/arm64/mm/mmu.c                 | 269 +++++++++++++++++++++++++---
>>  arch/arm64/mm/pageattr.c            |  37 +++-
>>  6 files changed, 319 insertions(+), 28 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
>> index 8f36ffa16b73..a95806980298 100644
>> --- a/arch/arm64/include/asm/cpufeature.h
>> +++ b/arch/arm64/include/asm/cpufeature.h
>> @@ -1053,6 +1053,32 @@ static inline bool cpu_has_lpa2(void)
>>  #endif
>>  }
>>  
>> +bool cpu_has_bbml2_noabort(unsigned int cpu_midr);
>> +
>> +static inline bool has_nobbml2_override(void)
>> +{
>> +	u64 mmfr2;
>> +	unsigned int bbm;
>> +
>> +	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
>> +	mmfr2 &= ~id_aa64mmfr2_override.mask;
>> +	mmfr2 |= id_aa64mmfr2_override.val;
>> +	bbm = cpuid_feature_extract_unsigned_field(mmfr2,
>> +						   ID_AA64MMFR2_EL1_BBM_SHIFT);
>> +	return bbm == 0;
>> +}
>> +
>> +/*
>> + * Called at early boot stage on boot CPU before cpu info and cpu feature
>> + * are ready.
>> + */
>> +static inline bool bbml2_noabort_available(void)
>> +{
>> +	return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
>> +	       cpu_has_bbml2_noabort(read_cpuid_id()) &&
>> +	       !has_nobbml2_override();
> 
> Based on Will's feedback, The Kconfig and the cmdline override will both
> disappear in Miko's next version and we will only use the MIDR list to decide
> BBML2_NOABORT status, so this will significantly simplify. Sorry about the churn
> here.
> 
>> +}
>> +
>>  #endif /* __ASSEMBLY__ */
>>  
>>  #endif
>> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
>> index 6e8aa8e72601..2693d63bf837 100644
>> --- a/arch/arm64/include/asm/mmu.h
>> +++ b/arch/arm64/include/asm/mmu.h
>> @@ -71,6 +71,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>>  			       pgprot_t prot, bool page_mappings_only);
>>  extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>>  extern void mark_linear_text_alias_ro(void);
>> +extern int split_linear_mapping(unsigned long start, unsigned long end);
> 
> nit: Perhaps split_leaf_mapping() or split_kernel_pgtable_mapping() or something
> similar is more generic which will benefit us in future when using this for
> vmalloc too?
> 
>>  
>>  /*
>>   * This check is triggered during the early boot before the cpufeature
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index d3b538be1500..bf3cef31d243 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -293,6 +293,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
>>  	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
>>  }
>>  
>> +static inline pmd_t pmd_mknoncont(pmd_t pmd)
>> +{
>> +	return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
>> +}
>> +
>>  static inline pte_t pte_mkdevmap(pte_t pte)
>>  {
>>  	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
>> @@ -769,7 +774,7 @@ static inline bool in_swapper_pgdir(void *addr)
>>  	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
>>  }
>>  
>> -static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>> +static inline void __set_pmd_nosync(pmd_t *pmdp, pmd_t pmd)
>>  {
>>  #ifdef __PAGETABLE_PMD_FOLDED
>>  	if (in_swapper_pgdir(pmdp)) {
>> @@ -779,6 +784,11 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>>  #endif /* __PAGETABLE_PMD_FOLDED */
>>  
>>  	WRITE_ONCE(*pmdp, pmd);
>> +}
>> +
>> +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>> +{
>> +	__set_pmd_nosync(pmdp, pmd);
>>  
>>  	if (pmd_valid(pmd)) {
>>  		dsb(ishst);
>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index e879bfcf853b..5fc2a4a804de 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -2209,7 +2209,7 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
>>  	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
>>  }
>>  
>> -static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
>> +bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
>>  {
>>  	/*
>>  	 * We want to allow usage of bbml2 in as wide a range of kernel contexts
> 
> 
> [...] I'll send a separate response for the mmu.c table walker changes.
> 
>>  
>> +int split_linear_mapping(unsigned long start, unsigned long end)
>> +{
>> +	int ret = 0;
>> +
>> +	if (!system_supports_bbml2_noabort())
>> +		return 0;
> 
> Hmm... I guess the thinking here is that for !BBML2_NOABORT you are expecting
> this function should only be called in the first place if we know we are
> pte-mapped. So I guess this is ok... it just means that if we are not
> pte-mapped, warnings will be emitted while walking the pgtables (as is the case
> today). So I think this approach is ok.
> 
>> +
>> +	mmap_write_lock(&init_mm);
> 
> What is the lock protecting? I was orignally thinking no locking should be
> needed because it's not needed for permission changes today; But I think you are
> right here and we do need locking; multiple owners could share a large leaf
> mapping, I guess? And in that case you could get concurrent attempts to split
> from both owners.
> 
> I'm not really a fan of adding the extra locking though; It might introduce a
> new bottleneck. I wonder if there is a way we could do this locklessly? i.e.
> allocate the new table, then cmpxchg to insert and the loser has to free? That
> doesn't work for contiguous mappings though...
> 
>> +	/* NO_EXEC_MAPPINGS is needed when splitting linear map */
>> +	ret = __create_pgd_mapping_locked(init_mm.pgd, virt_to_phys((void *)start),
>> +					  start, (end - start), __pgprot(0),
>> +					  __pgd_pgtable_alloc,
>> +					  NO_EXEC_MAPPINGS | SPLIT_MAPPINGS);
>> +	mmap_write_unlock(&init_mm);
>> +	flush_tlb_kernel_range(start, end);
> 
> I don't believe we should need to flush the TLB when only changing entry sizes
> when BBML2 is supported. Miko's series has a massive comment explaining the
> reasoning. That applies to user space though. We should consider if this all
> works safely for kernel space too, and hopefully remove the flush.
> 
>> +
>> +	return ret;
>> +}
>> +
>>  /*
>>   * This function can only be used to modify existing table entries,
>>   * without allocating new levels of table. Note that this permits the
>> @@ -676,6 +887,24 @@ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) {
>>  
>>  #endif /* CONFIG_KFENCE */
>>  
>> +static inline bool force_pte_mapping(void)
>> +{
>> +	/*
>> +	 * Can't use cpufeature API to determine whether BBML2 supported
>> +	 * or not since cpufeature have not been finalized yet.
>> +	 *
>> +	 * Checking the boot CPU only for now.  If the boot CPU has
>> +	 * BBML2, paint linear mapping with block mapping.  If it turns
>> +	 * out the secondary CPUs don't support BBML2 once cpufeature is
>> +	 * fininalized, the linear mapping will be repainted with PTE
>> +	 * mapping.
>> +	 */
>> +	return (rodata_full && !bbml2_noabort_available()) ||
> 
> So this is the case where we don't have BBML2 and need to modify protections at
> page granularity - I agree we need to force pte mappings here.
> 
>> +		debug_pagealloc_enabled() ||
> 
> This is the case where every page is made invalid on free and valid on
> allocation, so no point in having block mappings because it will soon degenerate
> into page mappings because we will have to split on every allocation. Agree here
> too.
> 
>> +		arm64_kfence_can_set_direct_map() ||
> 
> After looking into how kfence works, I don't agree with this one. It has a
> dedicated pool where it allocates from. That pool may be allocated early by the
> arch or may be allocated late by the core code. Either way, kfence will only
> modify protections within that pool. You current approach is forcing pte
> mappings if the pool allocation is late (i.e. not performed by the arch code
> during boot). But I think "late" is the most common case; kfence is compiled
> into the kernel but not active at boot. Certainly that's how my Ubuntu kernel is
> configured. So I think we should just ignore kfence here. If it's "early" then
> we map the pool with page granularity (as an optimization). If it's "late" your
> splitter will degenerate the whole kfence pool to page mappings over time as
> kfence_protect_page() -> set_memory_valid() is called. But the bulk of the
> linear map will remain mapped with large blocks.
> 
>> +		is_realm_world();
> 
> I think the only reason this requires pte mappings is for
> __set_memory_enc_dec(). But that can now deal with block mappings given the
> ability to split the mappings as needed. So I think this condition can be
> removed too.

To clarify; the latter 2 would still be needed for the !BBML2_NOABORT case. So I
think the expression becomes:

	return (!bbml2_noabort_available() && (rodata_full ||
		arm64_kfence_can_set_direct_map() || is_realm_world())) ||
		debug_pagealloc_enabled();

Thanks,
Ryan

> 
>> +}
> 
> Additionally, for can_set_direct_map(); at minimum it's comment should be tidied
> up, but really I think it should return true if "BBML2_NOABORT ||
> force_pte_mapping()". Because they are the conditions under which we can now
> safely modify the linear map.
> 
>> +
>>  static void __init map_mem(pgd_t *pgdp)
>>  {
>>  	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
>> @@ -701,7 +930,7 @@ static void __init map_mem(pgd_t *pgdp)
>>  
>>  	early_kfence_pool = arm64_kfence_alloc_pool();
>>  
>> -	if (can_set_direct_map())
>> +	if (force_pte_mapping())
>>  		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>  
>>  	/*
>> @@ -1402,7 +1631,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
>>  
>>  	VM_BUG_ON(!mhp_range_allowed(start, size, true));
>>  
>> -	if (can_set_direct_map())
>> +	if (force_pte_mapping())
>>  		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>  
>>  	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index 39fd1f7ff02a..25c068712cb5 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -10,6 +10,7 @@
>>  #include <linux/vmalloc.h>
>>  
>>  #include <asm/cacheflush.h>
>> +#include <asm/mmu.h>
>>  #include <asm/pgtable-prot.h>
>>  #include <asm/set_memory.h>
>>  #include <asm/tlbflush.h>
>> @@ -42,6 +43,8 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>>  	struct page_change_data *cdata = data;
>>  	pte_t pte = __ptep_get(ptep);
>>  
>> +	BUG_ON(pte_cont(pte));
> 
> I don't think this is required; We want to enable using contiguous mappings
> where it makes sense. As long as we have BBML2, we can update contiguous pte
> mappings in place, as long as we update all of the ptes in the contiguous block.
> split_linear_map() should either have converted to non-cont mappings if the
> contiguous block straddled the split point, or would have left as is (or
> downgraded a PMD-block to a contpte block) if fully contained within the split
> range.
> 
>> +
>>  	pte = clear_pte_bit(pte, cdata->clear_mask);
>>  	pte = set_pte_bit(pte, cdata->set_mask);
>>  
>> @@ -80,8 +83,9 @@ static int change_memory_common(unsigned long addr, int numpages,
>>  	unsigned long start = addr;
>>  	unsigned long size = PAGE_SIZE * numpages;
>>  	unsigned long end = start + size;
>> +	unsigned long l_start;
>>  	struct vm_struct *area;
>> -	int i;
>> +	int i, ret;
>>  
>>  	if (!PAGE_ALIGNED(addr)) {
>>  		start &= PAGE_MASK;
>> @@ -118,7 +122,12 @@ static int change_memory_common(unsigned long addr, int numpages,
>>  	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
>>  			    pgprot_val(clear_mask) == PTE_RDONLY)) {
>>  		for (i = 0; i < area->nr_pages; i++) {
>> -			__change_memory_common((u64)page_address(area->pages[i]),
>> +			l_start = (u64)page_address(area->pages[i]);
>> +			ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
>> +			if (WARN_ON_ONCE(ret))
>> +				return ret;
> 
> I don't think this is the right place to integrate; I think the split should be
> done inside __change_memory_common(). Then it caters to all possibilities (i.e.
> set_memory_valid() and __set_memory_enc_dec()). This means it will run for
> vmalloc too, but for now, that will be a nop because everything should already
> be split as required on entry and in future we will get that for free.
> 
> Once you have integrated Dev's series, the hook becomes
> ___change_memory_common() (3 underscores)...
> 
>> +
>> +			__change_memory_common(l_start,
>>  					       PAGE_SIZE, set_mask, clear_mask);
>>  		}
>>  	}
>> @@ -174,6 +183,9 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
>>  
>>  int set_direct_map_invalid_noflush(struct page *page)
>>  {
>> +	unsigned long l_start;
>> +	int ret;
>> +
>>  	struct page_change_data data = {
>>  		.set_mask = __pgprot(0),
>>  		.clear_mask = __pgprot(PTE_VALID),
>> @@ -182,13 +194,21 @@ int set_direct_map_invalid_noflush(struct page *page)
>>  	if (!can_set_direct_map())
>>  		return 0;
>>  
>> +	l_start = (unsigned long)page_address(page);
>> +	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
>> +	if (WARN_ON_ONCE(ret))
>> +		return ret;
>> +
>>  	return apply_to_page_range(&init_mm,
>> -				   (unsigned long)page_address(page),
>> -				   PAGE_SIZE, change_page_range, &data);
>> +				   l_start, PAGE_SIZE, change_page_range,
>> +				   &data);
> 
> ...and once integrated with Dev's series you don't need any changes here...
> 
>>  }
>>  
>>  int set_direct_map_default_noflush(struct page *page)
>>  {
>> +	unsigned long l_start;
>> +	int ret;
>> +
>>  	struct page_change_data data = {
>>  		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
>>  		.clear_mask = __pgprot(PTE_RDONLY),
>> @@ -197,9 +217,14 @@ int set_direct_map_default_noflush(struct page *page)
>>  	if (!can_set_direct_map())
>>  		return 0;
>>  
>> +	l_start = (unsigned long)page_address(page);
>> +	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
>> +	if (WARN_ON_ONCE(ret))
>> +		return ret;
>> +
>>  	return apply_to_page_range(&init_mm,
>> -				   (unsigned long)page_address(page),
>> -				   PAGE_SIZE, change_page_range, &data);
>> +				   l_start, PAGE_SIZE, change_page_range,
>> +				   &data);
> 
> ...or here.
> 
> Thanks,
> Ryan
> 
>>  }
>>  
>>  static int __set_memory_enc_dec(unsigned long addr,
>
Re: [PATCH 3/4] arm64: mm: support large block mapping when rodata=full
Posted by Yang Shi 6 months ago

On 6/16/25 5:33 AM, Ryan Roberts wrote:
> On 16/06/2025 12:58, Ryan Roberts wrote:
>> On 31/05/2025 03:41, Yang Shi wrote:
>>> When rodata=full is specified, kernel linear mapping has to be mapped at
>>> PTE level since large page table can't be split due to break-before-make
>>> rule on ARM64.
>>>
>>> This resulted in a couple of problems:
>>>    - performance degradation
>>>    - more TLB pressure
>>>    - memory waste for kernel page table
>>>
>>> With FEAT_BBM level 2 support, splitting large block page table to
>>> smaller ones doesn't need to make the page table entry invalid anymore.
>>> This allows kernel split large block mapping on the fly.
>>>
>>> Add kernel page table split support and use large block mapping by
>>> default when FEAT_BBM level 2 is supported for rodata=full.  When
>>> changing permissions for kernel linear mapping, the page table will be
>>> split to smaller size.
>>>
>>> The machine without FEAT_BBM level 2 will fallback to have kernel linear
>>> mapping PTE-mapped when rodata=full.
>>>
>>> With this we saw significant performance boost with some benchmarks and
>>> much less memory consumption on my AmpereOne machine (192 cores, 1P) with
>>> 256GB memory.
>>>
>>> * Memory use after boot
>>> Before:
>>> MemTotal:       258988984 kB
>>> MemFree:        254821700 kB
>>>
>>> After:
>>> MemTotal:       259505132 kB
>>> MemFree:        255410264 kB
>>>
>>> Around 500MB more memory are free to use.  The larger the machine, the
>>> more memory saved.
>>>
>>> * Memcached
>>> We saw performance degradation when running Memcached benchmark with
>>> rodata=full vs rodata=on.  Our profiling pointed to kernel TLB pressure.
>>> With this patchset we saw ops/sec is increased by around 3.5%, P99
>>> latency is reduced by around 9.6%.
>>> The gain mainly came from reduced kernel TLB misses.  The kernel TLB
>>> MPKI is reduced by 28.5%.
>>>
>>> The benchmark data is now on par with rodata=on too.
>>>
>>> * Disk encryption (dm-crypt) benchmark
>>> Ran fio benchmark with the below command on a 128G ramdisk (ext4) with disk
>>> encryption (by dm-crypt).
>>> fio --directory=/data --random_generator=lfsr --norandommap --randrepeat 1 \
>>>      --status-interval=999 --rw=write --bs=4k --loops=1 --ioengine=sync \
>>>      --iodepth=1 --numjobs=1 --fsync_on_close=1 --group_reporting --thread \
>>>      --name=iops-test-job --eta-newline=1 --size 100G
>>>
>>> The IOPS is increased by 90% - 150% (the variance is high, but the worst
>>> number of good case is around 90% more than the best number of bad case).
>>> The bandwidth is increased and the avg clat is reduced proportionally.
>>>
>>> * Sequential file read
>>> Read 100G file sequentially on XFS (xfs_io read with page cache populated).
>>> The bandwidth is increased by 150%.
>>>
>>> Signed-off-by: Yang Shi<yang@os.amperecomputing.com>
>>> ---
>>>   arch/arm64/include/asm/cpufeature.h |  26 +++
>>>   arch/arm64/include/asm/mmu.h        |   1 +
>>>   arch/arm64/include/asm/pgtable.h    |  12 +-
>>>   arch/arm64/kernel/cpufeature.c      |   2 +-
>>>   arch/arm64/mm/mmu.c                 | 269 +++++++++++++++++++++++++---
>>>   arch/arm64/mm/pageattr.c            |  37 +++-
>>>   6 files changed, 319 insertions(+), 28 deletions(-)
>>>
>>> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
>>> index 8f36ffa16b73..a95806980298 100644
>>> --- a/arch/arm64/include/asm/cpufeature.h
>>> +++ b/arch/arm64/include/asm/cpufeature.h
>>> @@ -1053,6 +1053,32 @@ static inline bool cpu_has_lpa2(void)
>>>   #endif
>>>   }
>>>   
>>> +bool cpu_has_bbml2_noabort(unsigned int cpu_midr);
>>> +
>>> +static inline bool has_nobbml2_override(void)
>>> +{
>>> +	u64 mmfr2;
>>> +	unsigned int bbm;
>>> +
>>> +	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
>>> +	mmfr2 &= ~id_aa64mmfr2_override.mask;
>>> +	mmfr2 |= id_aa64mmfr2_override.val;
>>> +	bbm = cpuid_feature_extract_unsigned_field(mmfr2,
>>> +						   ID_AA64MMFR2_EL1_BBM_SHIFT);
>>> +	return bbm == 0;
>>> +}
>>> +
>>> +/*
>>> + * Called at early boot stage on boot CPU before cpu info and cpu feature
>>> + * are ready.
>>> + */
>>> +static inline bool bbml2_noabort_available(void)
>>> +{
>>> +	return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
>>> +	       cpu_has_bbml2_noabort(read_cpuid_id()) &&
>>> +	       !has_nobbml2_override();
>> Based on Will's feedback, The Kconfig and the cmdline override will both
>> disappear in Miko's next version and we will only use the MIDR list to decide
>> BBML2_NOABORT status, so this will significantly simplify. Sorry about the churn
>> here.

Good news! I just saw Miko's v7 series, will rebase on to it.

>>> +}
>>> +
>>>   #endif /* __ASSEMBLY__ */
>>>   
>>>   #endif
>>> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
>>> index 6e8aa8e72601..2693d63bf837 100644
>>> --- a/arch/arm64/include/asm/mmu.h
>>> +++ b/arch/arm64/include/asm/mmu.h
>>> @@ -71,6 +71,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>>>   			       pgprot_t prot, bool page_mappings_only);
>>>   extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>>>   extern void mark_linear_text_alias_ro(void);
>>> +extern int split_linear_mapping(unsigned long start, unsigned long end);
>> nit: Perhaps split_leaf_mapping() or split_kernel_pgtable_mapping() or something
>> similar is more generic which will benefit us in future when using this for
>> vmalloc too?

Yeah, sure. Will use split_kernel_pgtable_mapping().

>>>   
>>>   /*
>>>    * This check is triggered during the early boot before the cpufeature
>>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>>> index d3b538be1500..bf3cef31d243 100644
>>> --- a/arch/arm64/include/asm/pgtable.h
>>> +++ b/arch/arm64/include/asm/pgtable.h
>>> @@ -293,6 +293,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
>>>   	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
>>>   }
>>>   
>>> +static inline pmd_t pmd_mknoncont(pmd_t pmd)
>>> +{
>>> +	return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
>>> +}
>>> +
>>>   static inline pte_t pte_mkdevmap(pte_t pte)
>>>   {
>>>   	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
>>> @@ -769,7 +774,7 @@ static inline bool in_swapper_pgdir(void *addr)
>>>   	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
>>>   }
>>>   
>>> -static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>>> +static inline void __set_pmd_nosync(pmd_t *pmdp, pmd_t pmd)
>>>   {
>>>   #ifdef __PAGETABLE_PMD_FOLDED
>>>   	if (in_swapper_pgdir(pmdp)) {
>>> @@ -779,6 +784,11 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>>>   #endif /* __PAGETABLE_PMD_FOLDED */
>>>   
>>>   	WRITE_ONCE(*pmdp, pmd);
>>> +}
>>> +
>>> +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>>> +{
>>> +	__set_pmd_nosync(pmdp, pmd);
>>>   
>>>   	if (pmd_valid(pmd)) {
>>>   		dsb(ishst);
>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>>> index e879bfcf853b..5fc2a4a804de 100644
>>> --- a/arch/arm64/kernel/cpufeature.c
>>> +++ b/arch/arm64/kernel/cpufeature.c
>>> @@ -2209,7 +2209,7 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
>>>   	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
>>>   }
>>>   
>>> -static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
>>> +bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
>>>   {
>>>   	/*
>>>   	 * We want to allow usage of bbml2 in as wide a range of kernel contexts
>> [...] I'll send a separate response for the mmu.c table walker changes.
>>
>>>   
>>> +int split_linear_mapping(unsigned long start, unsigned long end)
>>> +{
>>> +	int ret = 0;
>>> +
>>> +	if (!system_supports_bbml2_noabort())
>>> +		return 0;
>> Hmm... I guess the thinking here is that for !BBML2_NOABORT you are expecting
>> this function should only be called in the first place if we know we are
>> pte-mapped. So I guess this is ok... it just means that if we are not
>> pte-mapped, warnings will be emitted while walking the pgtables (as is the case
>> today). So I think this approach is ok.

Yes, we can't split kernel page table on the fly w/o BBML2_NOABORT and 
kernel linear mapping should be always PTE-mapped so it returns 0 
instead of errno.

>>> +
>>> +	mmap_write_lock(&init_mm);
>> What is the lock protecting? I was orignally thinking no locking should be
>> needed because it's not needed for permission changes today; But I think you are
>> right here and we do need locking; multiple owners could share a large leaf
>> mapping, I guess? And in that case you could get concurrent attempts to split
>> from both owners.

Yes, for example, [addr, addr + 4K) and [addr + 4K, addr + 8K) may 
belong to two different owners. There may be race when both the owners 
want to split the page table.

>> I'm not really a fan of adding the extra locking though; It might introduce a
>> new bottleneck. I wonder if there is a way we could do this locklessly? i.e.
>> allocate the new table, then cmpxchg to insert and the loser has to free? That
>> doesn't work for contiguous mappings though...

I'm not sure whether it is going to be a real bottleneck or not. I saw 
x86 uses a dedicated lock, called cpa_lock. So it seems the lock 
contention is not a real problem.
I used init_mm mmap_lock instead of inventing a new lock. My thought is 
we can start from something simple, we can optimize it if it turns out 
to be a problem.

>>> +	/* NO_EXEC_MAPPINGS is needed when splitting linear map */
>>> +	ret = __create_pgd_mapping_locked(init_mm.pgd, virt_to_phys((void *)start),
>>> +					  start, (end - start), __pgprot(0),
>>> +					  __pgd_pgtable_alloc,
>>> +					  NO_EXEC_MAPPINGS | SPLIT_MAPPINGS);
>>> +	mmap_write_unlock(&init_mm);
>>> +	flush_tlb_kernel_range(start, end);
>> I don't believe we should need to flush the TLB when only changing entry sizes
>> when BBML2 is supported. Miko's series has a massive comment explaining the
>> reasoning. That applies to user space though. We should consider if this all
>> works safely for kernel space too, and hopefully remove the flush.

I think it should be same with userspace. The point is hardware will 
handle TLB conflict gracefully with BBML2_NOABORT, and hardware does the 
same thing for userspace address and kernel address, so the tlb flush 
should be not necessary. The TLB pressure or implicit invalidation for 
conflict TLB will do the job.

>>> +
>>> +	return ret;
>>> +}
>>> +
>>>   /*
>>>    * This function can only be used to modify existing table entries,
>>>    * without allocating new levels of table. Note that this permits the
>>> @@ -676,6 +887,24 @@ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) {
>>>   
>>>   #endif /* CONFIG_KFENCE */
>>>   
>>> +static inline bool force_pte_mapping(void)
>>> +{
>>> +	/*
>>> +	 * Can't use cpufeature API to determine whether BBML2 supported
>>> +	 * or not since cpufeature have not been finalized yet.
>>> +	 *
>>> +	 * Checking the boot CPU only for now.  If the boot CPU has
>>> +	 * BBML2, paint linear mapping with block mapping.  If it turns
>>> +	 * out the secondary CPUs don't support BBML2 once cpufeature is
>>> +	 * fininalized, the linear mapping will be repainted with PTE
>>> +	 * mapping.
>>> +	 */
>>> +	return (rodata_full && !bbml2_noabort_available()) ||
>> So this is the case where we don't have BBML2 and need to modify protections at
>> page granularity - I agree we need to force pte mappings here.
>>
>>> +		debug_pagealloc_enabled() ||
>> This is the case where every page is made invalid on free and valid on
>> allocation, so no point in having block mappings because it will soon degenerate
>> into page mappings because we will have to split on every allocation. Agree here
>> too.
>>
>>> +		arm64_kfence_can_set_direct_map() ||
>> After looking into how kfence works, I don't agree with this one. It has a
>> dedicated pool where it allocates from. That pool may be allocated early by the
>> arch or may be allocated late by the core code. Either way, kfence will only
>> modify protections within that pool. You current approach is forcing pte
>> mappings if the pool allocation is late (i.e. not performed by the arch code
>> during boot). But I think "late" is the most common case; kfence is compiled
>> into the kernel but not active at boot. Certainly that's how my Ubuntu kernel is
>> configured. So I think we should just ignore kfence here. If it's "early" then
>> we map the pool with page granularity (as an optimization). If it's "late" your
>> splitter will degenerate the whole kfence pool to page mappings over time as
>> kfence_protect_page() -> set_memory_valid() is called. But the bulk of the
>> linear map will remain mapped with large blocks.

OK, thanks for looking into this. I misunderstood how the late pool works.

>>> +		is_realm_world();
>> I think the only reason this requires pte mappings is for
>> __set_memory_enc_dec(). But that can now deal with block mappings given the
>> ability to split the mappings as needed. So I think this condition can be
>> removed too.

Sure

> To clarify; the latter 2 would still be needed for the !BBML2_NOABORT case. So I
> think the expression becomes:
>
> 	return (!bbml2_noabort_available() && (rodata_full ||
> 		arm64_kfence_can_set_direct_map() || is_realm_world())) ||
> 		debug_pagealloc_enabled();

Thanks for coming up with this expression.

Thanks,
Yang

> Thanks,
> Ryan
>
>>> +}
>> Additionally, for can_set_direct_map(); at minimum it's comment should be tidied
>> up, but really I think it should return true if "BBML2_NOABORT ||
>> force_pte_mapping()". Because they are the conditions under which we can now
>> safely modify the linear map.
>>
>>> +
>>>   static void __init map_mem(pgd_t *pgdp)
>>>   {
>>>   	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
>>> @@ -701,7 +930,7 @@ static void __init map_mem(pgd_t *pgdp)
>>>   
>>>   	early_kfence_pool = arm64_kfence_alloc_pool();
>>>   
>>> -	if (can_set_direct_map())
>>> +	if (force_pte_mapping())
>>>   		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>>   
>>>   	/*
>>> @@ -1402,7 +1631,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
>>>   
>>>   	VM_BUG_ON(!mhp_range_allowed(start, size, true));
>>>   
>>> -	if (can_set_direct_map())
>>> +	if (force_pte_mapping())
>>>   		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>>   
>>>   	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
>>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>>> index 39fd1f7ff02a..25c068712cb5 100644
>>> --- a/arch/arm64/mm/pageattr.c
>>> +++ b/arch/arm64/mm/pageattr.c
>>> @@ -10,6 +10,7 @@
>>>   #include <linux/vmalloc.h>
>>>   
>>>   #include <asm/cacheflush.h>
>>> +#include <asm/mmu.h>
>>>   #include <asm/pgtable-prot.h>
>>>   #include <asm/set_memory.h>
>>>   #include <asm/tlbflush.h>
>>> @@ -42,6 +43,8 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>>>   	struct page_change_data *cdata = data;
>>>   	pte_t pte = __ptep_get(ptep);
>>>   
>>> +	BUG_ON(pte_cont(pte));
>> I don't think this is required; We want to enable using contiguous mappings
>> where it makes sense. As long as we have BBML2, we can update contiguous pte
>> mappings in place, as long as we update all of the ptes in the contiguous block.
>> split_linear_map() should either have converted to non-cont mappings if the
>> contiguous block straddled the split point, or would have left as is (or
>> downgraded a PMD-block to a contpte block) if fully contained within the split
>> range.
>>
>>> +
>>>   	pte = clear_pte_bit(pte, cdata->clear_mask);
>>>   	pte = set_pte_bit(pte, cdata->set_mask);
>>>   
>>> @@ -80,8 +83,9 @@ static int change_memory_common(unsigned long addr, int numpages,
>>>   	unsigned long start = addr;
>>>   	unsigned long size = PAGE_SIZE * numpages;
>>>   	unsigned long end = start + size;
>>> +	unsigned long l_start;
>>>   	struct vm_struct *area;
>>> -	int i;
>>> +	int i, ret;
>>>   
>>>   	if (!PAGE_ALIGNED(addr)) {
>>>   		start &= PAGE_MASK;
>>> @@ -118,7 +122,12 @@ static int change_memory_common(unsigned long addr, int numpages,
>>>   	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
>>>   			    pgprot_val(clear_mask) == PTE_RDONLY)) {
>>>   		for (i = 0; i < area->nr_pages; i++) {
>>> -			__change_memory_common((u64)page_address(area->pages[i]),
>>> +			l_start = (u64)page_address(area->pages[i]);
>>> +			ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
>>> +			if (WARN_ON_ONCE(ret))
>>> +				return ret;
>> I don't think this is the right place to integrate; I think the split should be
>> done inside __change_memory_common(). Then it caters to all possibilities (i.e.
>> set_memory_valid() and __set_memory_enc_dec()). This means it will run for
>> vmalloc too, but for now, that will be a nop because everything should already
>> be split as required on entry and in future we will get that for free.
>>
>> Once you have integrated Dev's series, the hook becomes
>> ___change_memory_common() (3 underscores)...
>>
>>> +
>>> +			__change_memory_common(l_start,
>>>   					       PAGE_SIZE, set_mask, clear_mask);
>>>   		}
>>>   	}
>>> @@ -174,6 +183,9 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
>>>   
>>>   int set_direct_map_invalid_noflush(struct page *page)
>>>   {
>>> +	unsigned long l_start;
>>> +	int ret;
>>> +
>>>   	struct page_change_data data = {
>>>   		.set_mask = __pgprot(0),
>>>   		.clear_mask = __pgprot(PTE_VALID),
>>> @@ -182,13 +194,21 @@ int set_direct_map_invalid_noflush(struct page *page)
>>>   	if (!can_set_direct_map())
>>>   		return 0;
>>>   
>>> +	l_start = (unsigned long)page_address(page);
>>> +	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
>>> +	if (WARN_ON_ONCE(ret))
>>> +		return ret;
>>> +
>>>   	return apply_to_page_range(&init_mm,
>>> -				   (unsigned long)page_address(page),
>>> -				   PAGE_SIZE, change_page_range, &data);
>>> +				   l_start, PAGE_SIZE, change_page_range,
>>> +				   &data);
>> ...and once integrated with Dev's series you don't need any changes here...
>>
>>>   }
>>>   
>>>   int set_direct_map_default_noflush(struct page *page)
>>>   {
>>> +	unsigned long l_start;
>>> +	int ret;
>>> +
>>>   	struct page_change_data data = {
>>>   		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
>>>   		.clear_mask = __pgprot(PTE_RDONLY),
>>> @@ -197,9 +217,14 @@ int set_direct_map_default_noflush(struct page *page)
>>>   	if (!can_set_direct_map())
>>>   		return 0;
>>>   
>>> +	l_start = (unsigned long)page_address(page);
>>> +	ret = split_linear_mapping(l_start, l_start + PAGE_SIZE);
>>> +	if (WARN_ON_ONCE(ret))
>>> +		return ret;
>>> +
>>>   	return apply_to_page_range(&init_mm,
>>> -				   (unsigned long)page_address(page),
>>> -				   PAGE_SIZE, change_page_range, &data);
>>> +				   l_start, PAGE_SIZE, change_page_range,
>>> +				   &data);
>> ...or here.
>>
>> Thanks,
>> Ryan
>>
>>>   }
>>>   
>>>   static int __set_memory_enc_dec(unsigned long addr,