From: Ard Biesheuvel <ardb@kernel.org>
Now that the vmemmap region is sized dynamically based on the actual
size of the kernel VA space, there are no longer any material
differences between supporting 48-bit and 52-bit VA space sizes for 64k
pages, which use the same number of translation levels. And if needed,
52-bit virtual addressing can be disabled at boot on systems that do
support it but where 48-bit virtual addressing is preferred.
The only remaining difference is the size of a root level user page
table, which grows from 512 bytes to 8k when 52-bit virtual addressing
is enabled, but given that both are less than the size of a page, this
is easily fixed in the pgd_alloc init code. (In all other possible cases
where vabits_actual < VABITS holds, the effective PGD_SIZE equals the
page size, and so this change has no effect.)
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/arm64/Kconfig | 5 +++--
arch/arm64/mm/pgd.c | 9 +++++----
2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ac8e7550430b..6a73fd61b4aa 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -379,11 +379,11 @@ config PGTABLE_LEVELS
int
default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
- default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
+ default 3 if ARM64_64K_PAGES
default 4 if ARM64_16K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
- default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
+ default 4 if ARM64_VA_BITS_48
default 5 if ARM64_4K_PAGES && ARM64_VA_BITS_52
config ARCH_SUPPORTS_UPROBES
@@ -1361,6 +1361,7 @@ config ARM64_VA_BITS_47
config ARM64_VA_BITS_48
bool "48-bit"
+ depends on !PAGE_SIZE_64KB
config ARM64_VA_BITS_52
bool "52-bit"
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 0c501cabc238..ecc4b1ec235c 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
void __init pgtable_cache_init(void)
{
+ unsigned int size = PGD_SIZE >> (VA_BITS - vabits_actual);
+
if (pgdir_is_page_size())
return;
-#ifdef CONFIG_ARM64_PA_BITS_52
/*
* With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes.
*/
- BUILD_BUG_ON(PGD_SIZE < 64);
-#endif
+ if (IS_ENABLED(CONFIG_ARM64_PA_BITS_52))
+ size = max(size, 64);
/*
* Naturally aligned pgds required by the architecture.
*/
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ pgd_cache = kmem_cache_create("pgd_cache", size, size,
SLAB_PANIC, NULL);
}
--
2.47.0.163.g1226f6d8fa-goog