From nobody Sun Feb 8 10:04:59 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 06EB61F61C for ; Mon, 10 Mar 2025 04:01:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1741579292; cv=none; b=hksArm882EhjvfX4ofBEDJe61ygxDOFmIfbe6w3XM8RpLyQnSlg9liy9QU575AAQ96V8njCn/t7XPw3otQdGh1qBv8eNdvj/EpUKIb709tyVPuYK47o1g8K6/GMkpWp79VIWXuG9xBsl3P6AT91cneMOiAoyYNmc54tyGsJO88w= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1741579292; c=relaxed/simple; bh=By0v6yOJs36I8AWVqAqg3kNEdFDGFbUNVh6dT8Fncw0=; h=From:To:Cc:Subject:Date:Message-Id:MIME-Version; b=YBasFEp7V1cMMIj5WIGslAfwXiTomThO+H6yFL/UavIShe2IYeHMnsGqzsesNMzF5vufshUaEWPK8z9lUZaA8bO8t+ABPD6N9k29pkRtQzNqKRfCe5a0ZKP664kWE/o781l0rKvnXF80EDKKOS0BY7seY3iIoqiI5HNtXqUl/OE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id B477D15A1; Sun, 9 Mar 2025 21:01:34 -0700 (PDT) Received: from a077893.arm.com (unknown [10.163.42.69]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 99DCC3F673; Sun, 9 Mar 2025 21:01:18 -0700 (PDT) From: Anshuman Khandual To: linux-arm-kernel@lists.infradead.org Cc: Anshuman Khandual , Catalin Marinas , Will Deacon , Mark Rutland , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Ard Biesheuvel , Ryan Roberts , linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com Subject: [PATCH V2] arm64/mm: Define PTDESC_ORDER Date: Mon, 10 Mar 2025 09:31:15 +0530 Message-Id: <20250310040115.91298-1-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.25.1 Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Address bytes shifted with a single 64 bit page table entry (any page table level) has been always hard coded as 3 (aka 2^3 =3D 8). Although intuitive = it is not very readable or easy to reason about. Besides it is going to change with D128, where each 128 bit page table entry will shift address bytes by 4 (aka 2^4 =3D 16) instead. Let's just formalise this address bytes shift value into a new macro called PTDESC_ORDER establishing a logical abstraction, thus improving readability as well. While here re-organize EARLY_LEVEL macro along with its dependents for better clarity. This does not cause any functional change. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Ard Biesheuvel Cc: Ryan Roberts Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: kasan-dev@googlegroups.com Acked-by: Ard Biesheuvel Reviewed-by: Ryan Roberts Signed-off-by: Anshuman Khandual --- This patch applies on v6.14-rc6 Changes in V2: - Replaced PTE_SHIFT with PTDESC_ORDER per Ard - Re-organized EARLY_LEVEL macro per Mark Changes in V1: https://lore.kernel.org/all/20250307050851.4034393-1-anshuman.khandual@arm.= com/ arch/arm64/Kconfig | 2 +- arch/arm64/include/asm/kernel-pgtable.h | 11 ++++++---- arch/arm64/include/asm/pgtable-hwdef.h | 27 ++++++++++++++----------- arch/arm64/kernel/pi/map_range.c | 2 +- arch/arm64/mm/kasan_init.c | 6 +++--- 5 files changed, 27 insertions(+), 21 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 940343beb3d4..657baf59fdbe 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -323,7 +323,7 @@ config ARCH_MMAP_RND_BITS_MIN default 18 =20 # max bits determined by the following formula: -# VA_BITS - PAGE_SHIFT - 3 +# VA_BITS - PAGE_SHIFT - PTDESC_ORDER config ARCH_MMAP_RND_BITS_MAX default 19 if ARM64_VA_BITS=3D36 default 24 if ARM64_VA_BITS=3D39 diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/a= sm/kernel-pgtable.h index fd5a08450b12..78c7e03a0e35 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -45,11 +45,14 @@ #define SPAN_NR_ENTRIES(vstart, vend, shift) \ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1) =20 -#define EARLY_ENTRIES(vstart, vend, shift, add) \ - (SPAN_NR_ENTRIES(vstart, vend, shift) + (add)) +/* Number of VA bits resolved by a single translation table level */ +#define PTDESC_TABLE_SHIFT (PAGE_SHIFT - PTDESC_ORDER) =20 -#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \ - (lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PA= GE_SHIFT - 3), add) : 0) +#define EARLY_ENTRIES(lvl, vstart, vend) \ + SPAN_NR_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * PTDESC_TABLE_SH= IFT) + +#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \ + ((lvls) > (lvl) ? EARLY_ENTRIES(lvl, vstart, vend) + (add) : 0) =20 #define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \ + EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a nex= t level page table */ \ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/as= m/pgtable-hwdef.h index a9136cc551cc..3c544edc3968 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -7,40 +7,43 @@ =20 #include =20 +#define PTDESC_ORDER 3 + /* * Number of page-table levels required to address 'va_bits' wide * address, without section mapping. We resolve the top (va_bits - PAGE_SH= IFT) - * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence: + * bits with (PAGE_SHIFT - PTDESC_ORDER) bits at each page table level. He= nce: * - * levels =3D DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3)) + * levels =3D DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - PTDESC_O= RDER)) * * where DIV_ROUND_UP(n, d) =3D> (((n) + (d) - 1) / (d)) * * We cannot include linux/kernel.h which defines DIV_ROUND_UP here * due to build issues. So we open code DIV_ROUND_UP here: * - * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3)) + * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - PTDESC_ORDER) - 1) / (PAGE_S= HIFT - PTDESC_ORDER)) * * which gets simplified as : */ -#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - = 3)) +#define ARM64_HW_PGTABLE_LEVELS(va_bits) \ + (((va_bits) - PTDESC_ORDER - 1) / (PAGE_SHIFT - PTDESC_ORDER)) =20 /* * Size mapped by an entry at level n ( -1 <=3D n <=3D 3) - * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits + * We map (PAGE_SHIFT - PTDESC_ORDER) at all translation levels and PAGE_S= HIFT bits * in the final page. The maximum number of translation levels supported by * the architecture is 5. Hence, starting at level n, we have further * ((4 - n) - 1) levels of translation excluding the offset within the pag= e. * So, the total number of bits mapped by an entry at level n is : * - * ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT + * ((4 - n) - 1) * (PAGE_SHIFT - PTDESC_ORDER) + PAGE_SHIFT * * Rearranging it a bit we get : - * (4 - n) * (PAGE_SHIFT - 3) + 3 + * (4 - n) * (PAGE_SHIFT - PTDESC_ORDER) + PTDESC_ORDER */ -#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3) +#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - PTDESC_ORDER) * (4 = - (n)) + PTDESC_ORDER) =20 -#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3)) +#define PTRS_PER_PTE (1 << (PAGE_SHIFT - PTDESC_ORDER)) =20 /* * PMD_SHIFT determines the size a level 2 page table entry can map. @@ -49,7 +52,7 @@ #define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PTRS_PER_PMD (1 << (PAGE_SHIFT - 3)) +#define PTRS_PER_PMD (1 << (PAGE_SHIFT - PTDESC_ORDER)) #endif =20 /* @@ -59,14 +62,14 @@ #define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3)) +#define PTRS_PER_PUD (1 << (PAGE_SHIFT - PTDESC_ORDER)) #endif =20 #if CONFIG_PGTABLE_LEVELS > 4 #define P4D_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(0) #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE-1)) -#define PTRS_PER_P4D (1 << (PAGE_SHIFT - 3)) +#define PTRS_PER_P4D (1 << (PAGE_SHIFT - PTDESC_ORDER)) #endif =20 /* diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_ra= nge.c index 2b69e3beeef8..f74335e13929 100644 --- a/arch/arm64/kernel/pi/map_range.c +++ b/arch/arm64/kernel/pi/map_range.c @@ -31,7 +31,7 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 p= a, pgprot_t prot, { u64 cmask =3D (level =3D=3D 3) ? CONT_PTE_SIZE - 1 : U64_MAX; pteval_t protval =3D pgprot_val(prot) & ~PTE_TYPE_MASK; - int lshift =3D (3 - level) * (PAGE_SHIFT - 3); + int lshift =3D (3 - level) * (PAGE_SHIFT - PTDESC_ORDER); u64 lmask =3D (PAGE_SIZE << lshift) - 1; =20 start &=3D PAGE_MASK; diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index b65a29440a0c..211821f80571 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -190,7 +190,7 @@ static void __init kasan_pgd_populate(unsigned long add= r, unsigned long end, */ static bool __init root_level_aligned(u64 addr) { - int shift =3D (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * (PAGE_SHIFT = - 3); + int shift =3D (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * (PAGE_SHIFT = - PTDESC_ORDER); =20 return (addr % (PAGE_SIZE << shift)) =3D=3D 0; } @@ -245,7 +245,7 @@ static int __init root_level_idx(u64 addr) */ u64 vabits =3D IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS : vabits_actual; - int shift =3D (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * (PAGE_SHIFT - 3); + int shift =3D (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * (PAGE_SHIFT - PTDES= C_ORDER); =20 return (addr & ~_PAGE_OFFSET(vabits)) >> (shift + PAGE_SHIFT); } @@ -269,7 +269,7 @@ static void __init clone_next_level(u64 addr, pgd_t *tm= p_pg_dir, pud_t *pud) */ static int __init next_level_idx(u64 addr) { - int shift =3D (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * (PAGE_SHIFT = - 3); + int shift =3D (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * (PAGE_SHIFT = - PTDESC_ORDER); =20 return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE; } --=20 2.25.1