From: Ard Biesheuvel <ardb@kernel.org>
Currently, the maximum supported physical address space can be
configured as either 48 bits or 52 bits. The only remaining difference
between these in practice is that the former omits the masking and
shifting required to construct TTBR and PTE values, which carry bits #48
and higher disjoint from the rest of the physical address.
The overhead of performing these additional calculations is negligible,
and so there is little reason to retain support for two different
configurations, and we can simply support whatever the hardware
supports.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/arm64/Kconfig | 31 +-------------------
arch/arm64/include/asm/assembler.h | 13 ++------
arch/arm64/include/asm/cpufeature.h | 3 +-
arch/arm64/include/asm/kvm_pgtable.h | 3 +-
arch/arm64/include/asm/pgtable-hwdef.h | 6 +---
arch/arm64/include/asm/pgtable-prot.h | 4 +--
arch/arm64/include/asm/pgtable.h | 11 +------
arch/arm64/include/asm/sysreg.h | 6 ----
arch/arm64/mm/pgd.c | 9 +++---
arch/arm64/mm/proc.S | 2 --
scripts/gdb/linux/constants.py.in | 1 -
tools/arch/arm64/include/asm/sysreg.h | 6 ----
12 files changed, 14 insertions(+), 81 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c1ca21adddc1..7ebd0ba32a32 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1416,38 +1416,9 @@ config ARM64_VA_BITS
default 48 if ARM64_VA_BITS_48
default 52 if ARM64_VA_BITS_52
-choice
- prompt "Physical address space size"
- default ARM64_PA_BITS_48
- help
- Choose the maximum physical address range that the kernel will
- support.
-
-config ARM64_PA_BITS_48
- bool "48-bit"
- depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
-
-config ARM64_PA_BITS_52
- bool "52-bit"
- depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
- help
- Enable support for a 52-bit physical address space, introduced as
- part of the ARMv8.2-LPA extension.
-
- With this enabled, the kernel will also continue to work on CPUs that
- do not support ARMv8.2-LPA, but with some added memory overhead (and
- minor performance overhead).
-
-endchoice
-
-config ARM64_PA_BITS
- int
- default 48 if ARM64_PA_BITS_48
- default 52 if ARM64_PA_BITS_52
-
config ARM64_LPA2
def_bool y
- depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
+ depends on !ARM64_64K_PAGES
choice
prompt "Endianness"
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ad63457a05c5..01a1e3c16283 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -342,14 +342,13 @@ alternative_cb_end
mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
- mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
#ifdef CONFIG_ARM64_LPA2
alternative_if_not ARM64_HAS_VA52
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
-alternative_else_nop_endif
-#endif
cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi
+alternative_else_nop_endif
+#endif
bfi \tcr, \tmp0, \pos, #3
.endm
@@ -599,21 +598,13 @@ alternative_endif
* ttbr: returns the TTBR value
*/
.macro phys_to_ttbr, ttbr, phys
-#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
-#else
- mov \ttbr, \phys
-#endif
.endm
.macro phys_to_pte, pte, phys
-#ifdef CONFIG_ARM64_PA_BITS_52
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
-#else
- mov \pte, \phys
-#endif
.endm
/*
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index b64e49bd9d10..ed327358e734 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -885,9 +885,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
* However, by the "D10.1.4 Principles of the ID scheme
* for fields in ID registers", ARM DDI 0487C.a, any new
* value is guaranteed to be higher than what we know already.
- * As a safe limit, we return the limit supported by the kernel.
*/
- default: return CONFIG_ARM64_PA_BITS;
+ default: return 52;
}
}
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index aab04097b505..525aef178cb4 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -30,8 +30,7 @@
static inline u64 kvm_get_parange_max(void)
{
- if (kvm_lpa2_is_enabled() ||
- (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
+ if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
return ID_AA64MMFR0_EL1_PARANGE_52;
else
return ID_AA64MMFR0_EL1_PARANGE_48;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index a9136cc551cc..9b34180042b2 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -176,7 +176,6 @@
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
-#ifdef CONFIG_ARM64_PA_BITS_52
#ifdef CONFIG_ARM64_64K_PAGES
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
#define PTE_ADDR_HIGH_SHIFT 36
@@ -186,7 +185,6 @@
#define PTE_ADDR_HIGH_SHIFT 42
#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
#endif
-#endif
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -327,12 +325,10 @@
/*
* TTBR.
*/
-#ifdef CONFIG_ARM64_PA_BITS_52
/*
- * TTBR_ELx[1] is RES0 in this configuration.
+ * TTBR_ELx[1] is RES0 when using 52-bit physical addressing
*/
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
-#endif
#ifdef CONFIG_ARM64_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index a95f1f77bb39..b73acf25341f 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -81,7 +81,7 @@ extern unsigned long prot_ns_shared;
#define lpa2_is_enabled() false
#define PTE_MAYBE_SHARED PTE_SHARED
#define PMD_MAYBE_SHARED PMD_SECT_S
-#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
+#define PHYS_MASK_SHIFT (52)
#else
static inline bool __pure lpa2_is_enabled(void)
{
@@ -90,7 +90,7 @@ static inline bool __pure lpa2_is_enabled(void)
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
-#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
+#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
#endif
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6986345b537a..ec8124d66b9c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
/*
- * Macros to convert between a physical address and its placement in a
+ * Helpers to convert between a physical address and its placement in a
* page table entry, taking care of 52-bit addresses.
*/
-#ifdef CONFIG_ARM64_PA_BITS_52
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
pte_val(pte) &= ~PTE_MAYBE_SHARED;
@@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
}
-#else
-#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
-#define __phys_to_pte_val(phys) (phys)
-#endif
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) \
@@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
-#else
-#define phys_to_ttbr(addr) (addr)
-#endif
/*
* On arm64 without hardware Access Flag, copying from user will fail because
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b8303a83c0bf..f902893ec903 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -916,12 +916,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
-#ifdef CONFIG_ARM64_PA_BITS_52
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
-#else
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
-#endif
-
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 0c501cabc238..8722ab6d4b1c 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
void __init pgtable_cache_init(void)
{
+ unsigned int pgd_size = PGD_SIZE;
+
if (pgdir_is_page_size())
return;
-#ifdef CONFIG_ARM64_PA_BITS_52
/*
* With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes.
*/
- BUILD_BUG_ON(PGD_SIZE < 64);
-#endif
+ if (PHYS_MASK_SHIFT >= 52)
+ pgd_size = max(pgd_size, 64);
/*
* Naturally aligned pgds required by the architecture.
*/
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
SLAB_PANIC, NULL);
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index b8edc5765441..51ed0e9d0a0d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
.macro pte_to_phys, phys, pte
and \phys, \pte, #PTE_ADDR_LOW
-#ifdef CONFIG_ARM64_PA_BITS_52
and \pte, \pte, #PTE_ADDR_HIGH
orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
-#endif
.endm
.macro kpti_mk_tbl_ng, type, num_entries
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index fd6bd69c5096..05034c0b8fd7 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES)
LX_CONFIG(CONFIG_ARM64_16K_PAGES)
LX_CONFIG(CONFIG_ARM64_64K_PAGES)
if IS_BUILTIN(CONFIG_ARM64):
- LX_VALUE(CONFIG_ARM64_PA_BITS)
LX_VALUE(CONFIG_ARM64_VA_BITS)
LX_VALUE(CONFIG_PAGE_SHIFT)
LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
index cd8420e8c3ad..daeecb1a5366 100644
--- a/tools/arch/arm64/include/asm/sysreg.h
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -574,12 +574,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
-#ifdef CONFIG_ARM64_PA_BITS_52
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
-#else
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
-#endif
-
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
--
2.47.1.613.gc27f4b7a9f-goog
Hi Ard, On 12/12/2024 16:18, Ard Biesheuvel wrote: > From: Ard Biesheuvel <ardb@kernel.org> > > Currently, the maximum supported physical address space can be > configured as either 48 bits or 52 bits. The only remaining difference > between these in practice is that the former omits the masking and > shifting required to construct TTBR and PTE values, which carry bits #48 > and higher disjoint from the rest of the physical address. > > The overhead of performing these additional calculations is negligible, > and so there is little reason to retain support for two different > configurations, and we can simply support whatever the hardware > supports. I am seeing a boot failure on Apple iPad 7 which uses CONFIG_ARM64_16K=y after this change in linux-next as commit 32d053d6f5e9, with nothing appearing on serial console with earlycon enabled unless I set CONFIG_ARM64_VA_BITS_52=y. Reverting this patch makes the kernel work again. Nick Chan
On Sun, 22 Dec 2024 at 13:05, Nick Chan <towinchenmi@gmail.com> wrote: > > Hi Ard, > > On 12/12/2024 16:18, Ard Biesheuvel wrote: > > From: Ard Biesheuvel <ardb@kernel.org> > > > > Currently, the maximum supported physical address space can be > > configured as either 48 bits or 52 bits. The only remaining difference > > between these in practice is that the former omits the masking and > > shifting required to construct TTBR and PTE values, which carry bits #48 > > and higher disjoint from the rest of the physical address. > > > > The overhead of performing these additional calculations is negligible, > > and so there is little reason to retain support for two different > > configurations, and we can simply support whatever the hardware > > supports. > > I am seeing a boot failure on Apple iPad 7 which uses > CONFIG_ARM64_16K=y after this change in linux-next as commit > 32d053d6f5e9, with nothing appearing on serial console with > earlycon enabled unless I set CONFIG_ARM64_VA_BITS_52=y. Reverting > this patch makes the kernel work again. > Thanks for the report. This is a known issue and has been fixed already in the arm64 tree a couple of days ago. Once linux-next is regenerated, things should start working again.
Hi Ard,
On Thu, Dec 12, 2024 at 09:18:48AM +0100, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@kernel.org>
>
> Currently, the maximum supported physical address space can be
> configured as either 48 bits or 52 bits. The only remaining difference
> between these in practice is that the former omits the masking and
> shifting required to construct TTBR and PTE values, which carry bits #48
> and higher disjoint from the rest of the physical address.
>
> The overhead of performing these additional calculations is negligible,
> and so there is little reason to retain support for two different
> configurations, and we can simply support whatever the hardware
> supports.
I am seeing a boot failure after this change as commit 32d053d6f5e9
("arm64/mm: Drop configurable 48-bit physical address space limit") in
next-20241220 with several distribution configurations that all set
ARM64_VA_BITS_48. I can reproduce it on bare metal and in QEMU. Simply:
$ echo 'CONFIG_ARM64_VA_BITS_52=n
CONFIG_ARM64_VA_BITS_48=y' >kernel/configs/repro.config
$ make -skj"$(nproc)" ARCH=arm64 CROSS_COMPILE=aarch64-linux- mrproper defconfig repro.config Image.gz
$ git diff --no-index .config.old .config
diff --git a/.config.old b/.config
index c6dfbacfae06..371145bbe022 100644
--- a/.config.old
+++ b/.config
@@ -290,7 +290,7 @@ CONFIG_MMU=y
CONFIG_ARM64_CONT_PTE_SHIFT=4
CONFIG_ARM64_CONT_PMD_SHIFT=4
CONFIG_ARCH_MMAP_RND_BITS_MIN=18
-CONFIG_ARCH_MMAP_RND_BITS_MAX=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=33
CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
CONFIG_STACKTRACE_SUPPORT=y
@@ -304,7 +304,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_SMP=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_PGTABLE_LEVELS=5
+CONFIG_PGTABLE_LEVELS=4
CONFIG_ARCH_SUPPORTS_UPROBES=y
CONFIG_ARCH_PROC_KCORE_TEXT=y
CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
@@ -426,7 +426,9 @@ CONFIG_ARM64_4K_PAGES=y
# CONFIG_ARM64_16K_PAGES is not set
# CONFIG_ARM64_64K_PAGES is not set
# CONFIG_ARM64_VA_BITS_39 is not set
-CONFIG_ARM64_VA_BITS=52
+CONFIG_ARM64_VA_BITS_48=y
+# CONFIG_ARM64_VA_BITS_52 is not set
+CONFIG_ARM64_VA_BITS=48
CONFIG_ARM64_LPA2=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_LITTLE_ENDIAN=y
@@ -11259,6 +11261,3 @@ CONFIG_MEMTEST=y
#
# end of Rust hacking
# end of Kernel hacking
-
-CONFIG_ARM64_VA_BITS_52=n
-CONFIG_ARM64_VA_BITS_48=y
$ qemu-system-aarch64 --version | head -1
QEMU emulator version 9.2.0 (qemu-9.2.0-1.fc42)
# With TCG, there is a crash
$ qemu-system-aarch64 \
-display none \
-nodefaults \
-cpu max,pauth-impdef=true \
-machine virt,gic-version=max,virtualization=true \
-append 'console=ttyAMA0 earlycon' \
-kernel arch/arm64/boot/Image.gz \
-initrd rootfs.cpio \
-m 512m -serial mon:stdio
[ 0.000000] Booting Linux on physical CPU 0x0000000000 [0x000f0510]
[ 0.000000] Linux version 6.13.0-rc2-00006-g32d053d6f5e9 (nathan@c3-large-arm64) (aarch64-linux-gcc (GCC) 14.2.0, GNU ld (GNU Binutils) 2.42) #1 SMP PREEMPT Fri Dec 20 23:42:18 UTC 2024
...
[ 0.000000] Unable to handle kernel paging request at virtual address ffff80008001ffe8
[ 0.000000] Mem abort info:
[ 0.000000] ESR = 0x0000000096000004
[ 0.000000] EC = 0x25: DABT (current EL), IL = 32 bits
[ 0.000000] SET = 0, FnV = 0
[ 0.000000] EA = 0, S1PTW = 0
[ 0.000000] FSC = 0x04: level 0 translation fault
[ 0.000000] Data abort info:
[ 0.000000] ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000
[ 0.000000] CM = 0, WnR = 0, TnD = 0, TagAccess = 0
[ 0.000000] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
[ 0.000000] swapper pgtable: 4k pages, 48-bit VAs, pgdp=0000000041f10000
[ 0.000000] [ffff80008001ffe8] pgd=0000000000000000, p4d=0000000000000000, pud=1000000043017403, pmd=1000000043018403, pte=006800000800f713
[ 0.000000] Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP
[ 0.000000] Modules linked in:
[ 0.000000] CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted 6.13.0-rc2-00006-g32d053d6f5e9 #1
[ 0.000000] Hardware name: linux,dummy-virt (DT)
[ 0.000000] pstate: 800000c9 (Nzcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[ 0.000000] pc : readl_relaxed+0x0/0x8
[ 0.000000] lr : gic_validate_dist_version+0x18/0x3c
[ 0.000000] sp : ffffb0df9bf63c90
[ 0.000000] x29: ffffb0df9bf63c90 x28: 0000000000000000 x27: 0000000000000000
[ 0.000000] x26: ffffb0df9bf63d78 x25: 0000000000000008 x24: dead000000000122
[ 0.000000] x23: ffff800080010000 x22: ffffb0df9bf63d88 x21: ffffb0df9bf63d78
[ 0.000000] x20: 0000000000000000 x19: ffff39131ff08a68 x18: 0000000000000001
[ 0.000000] x17: 0000000000000068 x16: 0000000000000100 x15: ffffb0df9b722ee0
[ 0.000000] x14: 0000000000000000 x13: ffff800080021000 x12: ffff80008001ffff
[ 0.000000] x11: 0000000000000000 x10: 0000000008010000 x9 : 0000000008010000
[ 0.000000] x8 : ffff80008001ffff x7 : ffff391303017008 x6 : ffff800080020000
[ 0.000000] x5 : 000000000000003f x4 : 000000000000003f x3 : 0000000000000000
[ 0.000000] x2 : 0000000000000000 x1 : 000000000000ffe8 x0 : ffff80008001ffe8
[ 0.000000] Call trace:
[ 0.000000] readl_relaxed+0x0/0x8 (P)
[ 0.000000] gic_validate_dist_version+0x18/0x3c (L)
[ 0.000000] gic_of_init+0x98/0x278
[ 0.000000] of_irq_init+0x1d4/0x34c
[ 0.000000] irqchip_init+0x18/0x40
[ 0.000000] init_IRQ+0x9c/0xb4
[ 0.000000] start_kernel+0x528/0x6d4
[ 0.000000] __primary_switched+0x88/0x90
[ 0.000000] Code: a8c17bfd d50323bf d65f03c0 d503201f (b9400000)
[ 0.000000] ---[ end trace 0000000000000000 ]---
[ 0.000000] Kernel panic - not syncing: Attempted to kill the idle task!
[ 0.000000] ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]---
# Using KVM, there is just a hang
$ qemu-system-aarch64 \
-display none \
-nodefaults \
-machine virt,gic-version=max \
-append 'console=ttyAMA0 earlycon' \
-kernel arch/arm64/boot/Image.gz \
-initrd rootfs.cpio \
-cpu host \
-enable-kvm \
-m 512m \
-smp 8 \
-serial mon:stdio
Is this a configuration issue? Reverting this change makes everything
work again.
Cheers,
Nathan
On Fri, Dec 20, 2024 at 05:29:06PM -0700, Nathan Chancellor wrote:
> On Thu, Dec 12, 2024 at 09:18:48AM +0100, Ard Biesheuvel wrote:
> > From: Ard Biesheuvel <ardb@kernel.org>
> >
> > Currently, the maximum supported physical address space can be
> > configured as either 48 bits or 52 bits. The only remaining difference
> > between these in practice is that the former omits the masking and
> > shifting required to construct TTBR and PTE values, which carry bits #48
> > and higher disjoint from the rest of the physical address.
> >
> > The overhead of performing these additional calculations is negligible,
> > and so there is little reason to retain support for two different
> > configurations, and we can simply support whatever the hardware
> > supports.
>
> I am seeing a boot failure after this change as commit 32d053d6f5e9
> ("arm64/mm: Drop configurable 48-bit physical address space limit") in
> next-20241220 with several distribution configurations that all set
> ARM64_VA_BITS_48. I can reproduce it on bare metal and in QEMU. Simply:
We dropped the patch yesterday, so hopefully things are better today.
Sorry for the bother and thanks for the report,
Will
Hi,
On 2024-12-12 09:18, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@kernel.org>
>
> Currently, the maximum supported physical address space can be
> configured as either 48 bits or 52 bits. The only remaining difference
> between these in practice is that the former omits the masking and
> shifting required to construct TTBR and PTE values, which carry bits #48
> and higher disjoint from the rest of the physical address.
>
> The overhead of performing these additional calculations is negligible,
> and so there is little reason to retain support for two different
> configurations, and we can simply support whatever the hardware
> supports.
>
With this patch (32d053d6f5e92efd82349e7c481cba5a43dc1a22 in
next-20241220), my Raspberry Pi 3 won't boot unless I set it to use
52-bit virtual address space (i.e. neither 39 or 48 work with a 4 KiB
page size), nothing appears on the serial console. I didn't see anyghing
suspicious in the kernel log for the 52-bit case but I attached it as I
don't exactly have much else.
I see that 52 bit physical address space previously depended on either
64 KiB pages or 52 bit virtual address space, could that be related?
Please let me know if there's anything else you need.
Regards,
Klara Modin
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> arch/arm64/Kconfig | 31 +-------------------
> arch/arm64/include/asm/assembler.h | 13 ++------
> arch/arm64/include/asm/cpufeature.h | 3 +-
> arch/arm64/include/asm/kvm_pgtable.h | 3 +-
> arch/arm64/include/asm/pgtable-hwdef.h | 6 +---
> arch/arm64/include/asm/pgtable-prot.h | 4 +--
> arch/arm64/include/asm/pgtable.h | 11 +------
> arch/arm64/include/asm/sysreg.h | 6 ----
> arch/arm64/mm/pgd.c | 9 +++---
> arch/arm64/mm/proc.S | 2 --
> scripts/gdb/linux/constants.py.in | 1 -
> tools/arch/arm64/include/asm/sysreg.h | 6 ----
> 12 files changed, 14 insertions(+), 81 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index c1ca21adddc1..7ebd0ba32a32 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -1416,38 +1416,9 @@ config ARM64_VA_BITS
> default 48 if ARM64_VA_BITS_48
> default 52 if ARM64_VA_BITS_52
>
> -choice
> - prompt "Physical address space size"
> - default ARM64_PA_BITS_48
> - help
> - Choose the maximum physical address range that the kernel will
> - support.
> -
> -config ARM64_PA_BITS_48
> - bool "48-bit"
> - depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
> -
> -config ARM64_PA_BITS_52
> - bool "52-bit"
> - depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
> - help
> - Enable support for a 52-bit physical address space, introduced as
> - part of the ARMv8.2-LPA extension.
> -
> - With this enabled, the kernel will also continue to work on CPUs that
> - do not support ARMv8.2-LPA, but with some added memory overhead (and
> - minor performance overhead).
> -
> -endchoice
> -
> -config ARM64_PA_BITS
> - int
> - default 48 if ARM64_PA_BITS_48
> - default 52 if ARM64_PA_BITS_52
> -
> config ARM64_LPA2
> def_bool y
> - depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
> + depends on !ARM64_64K_PAGES
>
> choice
> prompt "Endianness"
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index ad63457a05c5..01a1e3c16283 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -342,14 +342,13 @@ alternative_cb_end
> mrs \tmp0, ID_AA64MMFR0_EL1
> // Narrow PARange to fit the PS field in TCR_ELx
> ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
> - mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
> #ifdef CONFIG_ARM64_LPA2
> alternative_if_not ARM64_HAS_VA52
> mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
> -alternative_else_nop_endif
> -#endif
> cmp \tmp0, \tmp1
> csel \tmp0, \tmp1, \tmp0, hi
> +alternative_else_nop_endif
> +#endif
> bfi \tcr, \tmp0, \pos, #3
> .endm
>
> @@ -599,21 +598,13 @@ alternative_endif
> * ttbr: returns the TTBR value
> */
> .macro phys_to_ttbr, ttbr, phys
> -#ifdef CONFIG_ARM64_PA_BITS_52
> orr \ttbr, \phys, \phys, lsr #46
> and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
> -#else
> - mov \ttbr, \phys
> -#endif
> .endm
>
> .macro phys_to_pte, pte, phys
> -#ifdef CONFIG_ARM64_PA_BITS_52
> orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
> and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
> -#else
> - mov \pte, \phys
> -#endif
> .endm
>
> /*
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index b64e49bd9d10..ed327358e734 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -885,9 +885,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
> * However, by the "D10.1.4 Principles of the ID scheme
> * for fields in ID registers", ARM DDI 0487C.a, any new
> * value is guaranteed to be higher than what we know already.
> - * As a safe limit, we return the limit supported by the kernel.
> */
> - default: return CONFIG_ARM64_PA_BITS;
> + default: return 52;
> }
> }
>
> diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
> index aab04097b505..525aef178cb4 100644
> --- a/arch/arm64/include/asm/kvm_pgtable.h
> +++ b/arch/arm64/include/asm/kvm_pgtable.h
> @@ -30,8 +30,7 @@
>
> static inline u64 kvm_get_parange_max(void)
> {
> - if (kvm_lpa2_is_enabled() ||
> - (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
> + if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
> return ID_AA64MMFR0_EL1_PARANGE_52;
> else
> return ID_AA64MMFR0_EL1_PARANGE_48;
> diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
> index a9136cc551cc..9b34180042b2 100644
> --- a/arch/arm64/include/asm/pgtable-hwdef.h
> +++ b/arch/arm64/include/asm/pgtable-hwdef.h
> @@ -176,7 +176,6 @@
> #define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
>
> #define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
> -#ifdef CONFIG_ARM64_PA_BITS_52
> #ifdef CONFIG_ARM64_64K_PAGES
> #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
> #define PTE_ADDR_HIGH_SHIFT 36
> @@ -186,7 +185,6 @@
> #define PTE_ADDR_HIGH_SHIFT 42
> #define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
> #endif
> -#endif
>
> /*
> * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
> @@ -327,12 +325,10 @@
> /*
> * TTBR.
> */
> -#ifdef CONFIG_ARM64_PA_BITS_52
> /*
> - * TTBR_ELx[1] is RES0 in this configuration.
> + * TTBR_ELx[1] is RES0 when using 52-bit physical addressing
> */
> #define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
> -#endif
>
> #ifdef CONFIG_ARM64_VA_BITS_52
> /* Must be at least 64-byte aligned to prevent corruption of the TTBR */
> diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
> index a95f1f77bb39..b73acf25341f 100644
> --- a/arch/arm64/include/asm/pgtable-prot.h
> +++ b/arch/arm64/include/asm/pgtable-prot.h
> @@ -81,7 +81,7 @@ extern unsigned long prot_ns_shared;
> #define lpa2_is_enabled() false
> #define PTE_MAYBE_SHARED PTE_SHARED
> #define PMD_MAYBE_SHARED PMD_SECT_S
> -#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
> +#define PHYS_MASK_SHIFT (52)
> #else
> static inline bool __pure lpa2_is_enabled(void)
> {
> @@ -90,7 +90,7 @@ static inline bool __pure lpa2_is_enabled(void)
>
> #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
> #define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
> -#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
> +#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
> #endif
>
> /*
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 6986345b537a..ec8124d66b9c 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
>
> /*
> - * Macros to convert between a physical address and its placement in a
> + * Helpers to convert between a physical address and its placement in a
> * page table entry, taking care of 52-bit addresses.
> */
> -#ifdef CONFIG_ARM64_PA_BITS_52
> static inline phys_addr_t __pte_to_phys(pte_t pte)
> {
> pte_val(pte) &= ~PTE_MAYBE_SHARED;
> @@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
> {
> return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
> }
> -#else
> -#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
> -#define __phys_to_pte_val(phys) (phys)
> -#endif
>
> #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
> #define pfn_pte(pfn,prot) \
> @@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
> update_mmu_cache_range(NULL, vma, addr, ptep, 1)
> #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
>
> -#ifdef CONFIG_ARM64_PA_BITS_52
> #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
> -#else
> -#define phys_to_ttbr(addr) (addr)
> -#endif
>
> /*
> * On arm64 without hardware Access Flag, copying from user will fail because
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index b8303a83c0bf..f902893ec903 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -916,12 +916,6 @@
> #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
> #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
>
> -#ifdef CONFIG_ARM64_PA_BITS_52
> -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
> -#else
> -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
> -#endif
> -
> #if defined(CONFIG_ARM64_4K_PAGES)
> #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
> #define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
> diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
> index 0c501cabc238..8722ab6d4b1c 100644
> --- a/arch/arm64/mm/pgd.c
> +++ b/arch/arm64/mm/pgd.c
> @@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
>
> void __init pgtable_cache_init(void)
> {
> + unsigned int pgd_size = PGD_SIZE;
> +
> if (pgdir_is_page_size())
> return;
>
> -#ifdef CONFIG_ARM64_PA_BITS_52
> /*
> * With 52-bit physical addresses, the architecture requires the
> * top-level table to be aligned to at least 64 bytes.
> */
> - BUILD_BUG_ON(PGD_SIZE < 64);
> -#endif
> + if (PHYS_MASK_SHIFT >= 52)
> + pgd_size = max(pgd_size, 64);
>
> /*
> * Naturally aligned pgds required by the architecture.
> */
> - pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
> + pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
> SLAB_PANIC, NULL);
> }
> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
> index b8edc5765441..51ed0e9d0a0d 100644
> --- a/arch/arm64/mm/proc.S
> +++ b/arch/arm64/mm/proc.S
> @@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
>
> .macro pte_to_phys, phys, pte
> and \phys, \pte, #PTE_ADDR_LOW
> -#ifdef CONFIG_ARM64_PA_BITS_52
> and \pte, \pte, #PTE_ADDR_HIGH
> orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
> -#endif
> .endm
>
> .macro kpti_mk_tbl_ng, type, num_entries
> diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
> index fd6bd69c5096..05034c0b8fd7 100644
> --- a/scripts/gdb/linux/constants.py.in
> +++ b/scripts/gdb/linux/constants.py.in
> @@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES)
> LX_CONFIG(CONFIG_ARM64_16K_PAGES)
> LX_CONFIG(CONFIG_ARM64_64K_PAGES)
> if IS_BUILTIN(CONFIG_ARM64):
> - LX_VALUE(CONFIG_ARM64_PA_BITS)
> LX_VALUE(CONFIG_ARM64_VA_BITS)
> LX_VALUE(CONFIG_PAGE_SHIFT)
> LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)
> diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
> index cd8420e8c3ad..daeecb1a5366 100644
> --- a/tools/arch/arm64/include/asm/sysreg.h
> +++ b/tools/arch/arm64/include/asm/sysreg.h
> @@ -574,12 +574,6 @@
> #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
> #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
>
> -#ifdef CONFIG_ARM64_PA_BITS_52
> -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
> -#else
> -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
> -#endif
> -
> #if defined(CONFIG_ARM64_4K_PAGES)
> #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
> #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
# bad: [8155b4ef3466f0e289e8fcc9e6e62f3f4dceeac2] Add linux-next specific files for 20241220
# good: [8faabc041a001140564f718dabe37753e88b37fa] Merge tag 'net-6.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
git bisect start 'next/master' 'next/stable'
# bad: [d711d1b348a1574a2c24872512067d190b63fd68] Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
git bisect bad d711d1b348a1574a2c24872512067d190b63fd68
# bad: [3aa602263e025bb42ca8766a16bceff287a8f0ee] Merge branch 'xtensa-for-next' of git://github.com/jcmvbkbc/linux-xtensa.git
git bisect bad 3aa602263e025bb42ca8766a16bceff287a8f0ee
# good: [c4262cd734f8695b217332ed2ca7237a7e753b62] Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
git bisect good c4262cd734f8695b217332ed2ca7237a7e753b62
# bad: [a03c7cb185a0648d4f67fb63acf4b98b6fe8d0f7] Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mediatek/linux.git
git bisect bad a03c7cb185a0648d4f67fb63acf4b98b6fe8d0f7
# bad: [2086880948dccced5110e472a99915913cec1b8b] Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/amlogic/linux.git
git bisect bad 2086880948dccced5110e472a99915913cec1b8b
# good: [e7bb49e3f6435ff3611b83f78a61d387f24d80f8] perf x86: Define arch_fetch_insn in NO_AUXTRACE builds
git bisect good e7bb49e3f6435ff3611b83f78a61d387f24d80f8
# bad: [7bdd902c162d5576785095a0f8885df84bb472f5] Merge branch 'for-next/core' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
git bisect bad 7bdd902c162d5576785095a0f8885df84bb472f5
# bad: [d6ab634f1b323db6639b8b776f5d95ae747b342a] Merge branches 'for-next/cpufeature', 'for-next/docs', 'for-next/misc' and 'for-next/mm' into for-next/core
git bisect bad d6ab634f1b323db6639b8b776f5d95ae747b342a
# bad: [32d053d6f5e92efd82349e7c481cba5a43dc1a22] arm64/mm: Drop configurable 48-bit physical address space limit
git bisect bad 32d053d6f5e92efd82349e7c481cba5a43dc1a22
# good: [f0da16992aef7e246b2f3bba1492e3a52c38ca0e] arm64/kvm: Configure HYP TCR.PS/DS based on host stage1
git bisect good f0da16992aef7e246b2f3bba1492e3a52c38ca0e
# good: [92b6919d7fb29691a8bc5aca49044056683542ca] arm64: Kconfig: force ARM64_PAN=y when enabling TTBR0 sw PAN
git bisect good 92b6919d7fb29691a8bc5aca49044056683542ca
# first bad commit: [32d053d6f5e92efd82349e7c481cba5a43dc1a22] arm64/mm: Drop configurable 48-bit physical address space limit
On Sat, 21 Dec 2024 at 00:39, Klara Modin <klarasmodin@gmail.com> wrote: > > Hi, > > On 2024-12-12 09:18, Ard Biesheuvel wrote: > > From: Ard Biesheuvel <ardb@kernel.org> > > > > Currently, the maximum supported physical address space can be > > configured as either 48 bits or 52 bits. The only remaining difference > > between these in practice is that the former omits the masking and > > shifting required to construct TTBR and PTE values, which carry bits #48 > > and higher disjoint from the rest of the physical address. > > > > The overhead of performing these additional calculations is negligible, > > and so there is little reason to retain support for two different > > configurations, and we can simply support whatever the hardware > > supports. > > > > With this patch (32d053d6f5e92efd82349e7c481cba5a43dc1a22 in > next-20241220), my Raspberry Pi 3 won't boot unless I set it to use > 52-bit virtual address space (i.e. neither 39 or 48 work with a 4 KiB > page size), nothing appears on the serial console. I didn't see anyghing > suspicious in the kernel log for the 52-bit case but I attached it as I > don't exactly have much else. > > I see that 52 bit physical address space previously depended on either > 64 KiB pages or 52 bit virtual address space, could that be related? > > Please let me know if there's anything else you need. > Thanks for the report. This patch has already been dropped from the arm64 tree, so it should be gone from linux-next once it gets regenerated.
© 2016 - 2025 Red Hat, Inc.