From nobody Sun Feb 8 10:30:07 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 7CB2730E0CC for ; Wed, 17 Dec 2025 18:20:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765995622; cv=none; b=pTKG2qAHOOpamgpaigkA7i4KG9J9+WfbCfS9hQQvqO51o8pfMBz8gfAOrOBHjtVlG+bK28Jn/rm7ZKT9YfzeIlcyFFGt4WpS/KnyoXIVQVmuTqw1tQziXL6TEBDIUTxkp9GwrhL/RveaOM2LEHhLyZ+l6wu2i3a4We7fisrfNhw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765995622; c=relaxed/simple; bh=E4GHoHb2Dv89YuKL5pqLpR+4h3IOhVAsGnVfwlooA5s=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Fy2s1z/bgCRKDOqTeGxOdgJzSgILW9iLRU+ToMNYwwr7wZkqY8hnX0qQR88WorVs5ISBv5kjY7UKc8tsQz9dCg3EYrOEwONvZl6oXb1cJ3YMgUjt3nYuxMDjnXPFF02pa9ncsxxarqOn0hpKOpI9sD2WYJ0DkRGKI7NajJiW9e0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 33A4B106F; Wed, 17 Dec 2025 10:20:08 -0800 (PST) Received: from e129823.cambridge.arm.com (e129823.arm.com [10.1.197.6]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 34DA33F5CA; Wed, 17 Dec 2025 10:20:12 -0800 (PST) From: Yeoreum Yun To: catalin.marinas@arm.com, will@kernel.org, ryan.roberts@arm.com, akpm@linux-foundation.org, david@kernel.org, kevin.brodsky@arm.com, quic_zhenhuah@quicinc.com, dev.jain@arm.com, yang@os.amperecomputing.com, chaitanyas.prakash@arm.com, bigeasy@linutronix.de, clrkwllms@kernel.org, rostedt@goodmis.org, lorenzo.stoakes@oracle.com, ardb@kernel.org, jackmanb@google.com, vbabka@suse.cz Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, linux-rt-devel@lists.linux.dev, Yeoreum Yun Subject: [PATCH v2 1/2] arm64: mmu: don't allocate page while spliting linear mapping Date: Wed, 17 Dec 2025 18:20:06 +0000 Message-Id: <20251217182007.2345700-2-yeoreum.yun@arm.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251217182007.2345700-1-yeoreum.yun@arm.com> References: <20251217182007.2345700-1-yeoreum.yun@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Current linear_map_split_to_ptes() allocate the pagetable while split linear mapping to ptes by stop_machine() with GFP_ATOMIC. This is fine for non-PREEMPR_RT case. However It's a problem in PREEMPR_RT case since generic memory allocation/free APIs (e.x) pgtable_alloc(), __get_free_pages= and etc couldn't be called in non-preemptible context except _nolock() APIs since generic memory allocation/free APIs are *sleepable* for using *spin_l= ock()* IOW, calling a pgtable_alloc() even with GFP_ATOMIC doesn't allow in __linear_map_split_to_pte() executed by stopper thread where preemption is disabled in PREEMPR_RT. To address this, divide linear_map_maybe_split_ptes: - collect number of pages to require for spliting. - allocate the required number of pages for spliting. - with pre-allocate page, split the linear map. Fixes: 3df6979d222b ("arm64: mm: split linear mapping if BBML2 unsupported = on secondary CPUs") Signed-off-by: Yeoreum Yun --- arch/arm64/mm/mmu.c | 213 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 170 insertions(+), 43 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9ae7ce00a7ef..e4e6c7e0a016 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -527,18 +527,28 @@ static void early_create_pgd_mapping(pgd_t *pgdir, ph= ys_addr_t phys, panic("Failed to create page tables\n"); } =20 -static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp, - enum pgtable_type pgtable_type) -{ - /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */ - struct ptdesc *ptdesc =3D pagetable_alloc(gfp & ~__GFP_ZERO, 0); - phys_addr_t pa; - - if (!ptdesc) - return INVALID_PHYS_ADDR; +enum split_modes { + SPLIT_ALLOC, + SPLIT_PREALLOC, + SPLIT_COLLECT, +}; =20 - pa =3D page_to_phys(ptdesc_page(ptdesc)); +struct split_args { + enum split_modes mode; + union { + struct { + unsigned long nr; + unsigned long i; + struct page **pages; + }; + gfp_t gfp; + }; +}; =20 +static __always_inline void __pgd_pgtable_init(struct mm_struct *mm, + struct ptdesc *ptdesc, + enum pgtable_type pgtable_type) +{ switch (pgtable_type) { case TABLE_PTE: BUG_ON(!pagetable_pte_ctor(mm, ptdesc)); @@ -554,19 +564,56 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_stru= ct *mm, gfp_t gfp, break; } =20 +} + +static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp, + enum pgtable_type pgtable_type) +{ + /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */ + struct ptdesc *ptdesc =3D pagetable_alloc(gfp & ~__GFP_ZERO, 0); + phys_addr_t pa; + + if (!ptdesc) + return INVALID_PHYS_ADDR; + + pa =3D page_to_phys(ptdesc_page(ptdesc)); + + __pgd_pgtable_init(mm, ptdesc, pgtable_type); + return pa; } =20 +static phys_addr_t __pgd_pgtable_prealloc(struct mm_struct *mm, + struct split_args *split_args, + enum pgtable_type pgtable_type) +{ + struct page *page; + + BUG_ON(split_args->i >=3D split_args->nr); + + page =3D split_args->pages[split_args->i++]; + if (!page) + return INVALID_PHYS_ADDR; + + __pgd_pgtable_init(mm, page_ptdesc(page), pgtable_type); + + return page_to_phys(page); +} + static phys_addr_t -pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp) +pgd_pgtable_alloc_split(enum pgtable_type pgtable_type, + struct split_args *split_args) { - return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type); + if (split_args->mode =3D=3D SPLIT_ALLOC) + return __pgd_pgtable_alloc(&init_mm, split_args->gfp, pgtable_type); + + return __pgd_pgtable_prealloc(&init_mm, split_args, pgtable_type); } =20 static phys_addr_t __maybe_unused pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type) { - return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL); + return __pgd_pgtable_alloc(&init_mm, GFP_PGTABLE_KERNEL, pgtable_type); } =20 static phys_addr_t @@ -584,7 +631,9 @@ static void split_contpte(pte_t *ptep) __set_pte(ptep, pte_mknoncont(__ptep_get(ptep))); } =20 -static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont) +static int split_pmd(pmd_t *pmdp, pmd_t pmd, + struct split_args *split_args, + bool to_cont) { pmdval_t tableprot =3D PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF; unsigned long pfn =3D pmd_pfn(pmd); @@ -593,7 +642,7 @@ static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp,= bool to_cont) pte_t *ptep; int i; =20 - pte_phys =3D pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp); + pte_phys =3D pgd_pgtable_alloc_split(TABLE_PTE, split_args); if (pte_phys =3D=3D INVALID_PHYS_ADDR) return -ENOMEM; ptep =3D (pte_t *)phys_to_virt(pte_phys); @@ -628,7 +677,9 @@ static void split_contpmd(pmd_t *pmdp) set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp))); } =20 -static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont) +static int split_pud(pud_t *pudp, pud_t pud, + struct split_args *split_args, + bool to_cont) { pudval_t tableprot =3D PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF; unsigned int step =3D PMD_SIZE >> PAGE_SHIFT; @@ -638,7 +689,7 @@ static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp,= bool to_cont) pmd_t *pmdp; int i; =20 - pmd_phys =3D pgd_pgtable_alloc_init_mm_gfp(TABLE_PMD, gfp); + pmd_phys =3D pgd_pgtable_alloc_split(TABLE_PMD, split_args); if (pmd_phys =3D=3D INVALID_PHYS_ADDR) return -ENOMEM; pmdp =3D (pmd_t *)phys_to_virt(pmd_phys); @@ -672,6 +723,10 @@ static int split_kernel_leaf_mapping_locked(unsigned l= ong addr) pmd_t *pmdp, pmd; pte_t *ptep, pte; int ret =3D 0; + struct split_args split_args =3D { + .mode =3D SPLIT_ALLOC, + .gfp =3D GFP_PGTABLE_KERNEL, + }; =20 /* * PGD: If addr is PGD aligned then addr already describes a leaf @@ -707,7 +762,7 @@ static int split_kernel_leaf_mapping_locked(unsigned lo= ng addr) if (!pud_present(pud)) goto out; if (pud_leaf(pud)) { - ret =3D split_pud(pudp, pud, GFP_PGTABLE_KERNEL, true); + ret =3D split_pud(pudp, pud, &split_args, true); if (ret) goto out; } @@ -732,7 +787,7 @@ static int split_kernel_leaf_mapping_locked(unsigned lo= ng addr) */ if (ALIGN_DOWN(addr, PMD_SIZE) =3D=3D addr) goto out; - ret =3D split_pmd(pmdp, pmd, GFP_PGTABLE_KERNEL, true); + ret =3D split_pmd(pmdp, pmd, &split_args, true); if (ret) goto out; } @@ -831,12 +886,17 @@ int split_kernel_leaf_mapping(unsigned long start, un= signed long end) static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr, unsigned long next, struct mm_walk *walk) { - gfp_t gfp =3D *(gfp_t *)walk->private; + struct split_args *split_args =3D (struct split_args *)walk->private; pud_t pud =3D pudp_get(pudp); int ret =3D 0; =20 - if (pud_leaf(pud)) - ret =3D split_pud(pudp, pud, gfp, false); + if (!pud_leaf(pud)) + return 0; + + if (split_args->mode =3D=3D SPLIT_COLLECT) + split_args->nr++; + else + ret =3D split_pud(pudp, pud, split_args, false); =20 return ret; } @@ -844,22 +904,29 @@ static int split_to_ptes_pud_entry(pud_t *pudp, unsig= ned long addr, static int split_to_ptes_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long next, struct mm_walk *walk) { - gfp_t gfp =3D *(gfp_t *)walk->private; + struct split_args *split_args =3D (struct split_args *)walk->private; pmd_t pmd =3D pmdp_get(pmdp); - int ret =3D 0; + int ret; =20 - if (pmd_leaf(pmd)) { - if (pmd_cont(pmd)) - split_contpmd(pmdp); - ret =3D split_pmd(pmdp, pmd, gfp, false); + if (!pmd_leaf(pmd)) + return 0; =20 - /* - * We have split the pmd directly to ptes so there is no need to - * visit each pte to check if they are contpte. - */ - walk->action =3D ACTION_CONTINUE; + if (split_args->mode =3D=3D SPLIT_COLLECT) { + split_args->nr++; + return 0; } =20 + if (pmd_cont(pmd)) + split_contpmd(pmdp); + ret =3D split_pmd(pmdp, pmd, split_args, false); + + /* + * We have split the pmd directly to ptes so there is no need to + * visit each pte to check if they are contpte. + */ + walk->action =3D ACTION_CONTINUE; + + return ret; } =20 @@ -880,13 +947,15 @@ static const struct mm_walk_ops split_to_ptes_ops =3D= { .pte_entry =3D split_to_ptes_pte_entry, }; =20 -static int range_split_to_ptes(unsigned long start, unsigned long end, gfp= _t gfp) +static int range_split_to_ptes(unsigned long start, unsigned long end, + struct split_args *split_args) { int ret; =20 arch_enter_lazy_mmu_mode(); ret =3D walk_kernel_page_table_range_lockless(start, end, - &split_to_ptes_ops, NULL, &gfp); + &split_to_ptes_ops, NULL, + split_args); arch_leave_lazy_mmu_mode(); =20 return ret; @@ -903,7 +972,7 @@ static void __init init_idmap_kpti_bbml2_flag(void) smp_mb(); } =20 -static int __init linear_map_split_to_ptes(void *__unused) +static int __init linear_map_split_to_ptes(void *data) { /* * Repainting the linear map must be done by CPU0 (the boot CPU) because @@ -911,6 +980,7 @@ static int __init linear_map_split_to_ptes(void *__unus= ed) * be held in a waiting area with the idmap active. */ if (!smp_processor_id()) { + struct split_args *split_args =3D data; unsigned long lstart =3D _PAGE_OFFSET(vabits_actual); unsigned long lend =3D PAGE_END; unsigned long kstart =3D (unsigned long)lm_alias(_stext); @@ -928,12 +998,13 @@ static int __init linear_map_split_to_ptes(void *__un= used) * PTE. The kernel alias remains static throughout runtime so * can continue to be safely mapped with large mappings. */ - ret =3D range_split_to_ptes(lstart, kstart, GFP_ATOMIC); + ret =3D range_split_to_ptes(lstart, lend, split_args); if (!ret) - ret =3D range_split_to_ptes(kend, lend, GFP_ATOMIC); + ret =3D range_split_to_ptes(kstart, kend, split_args); if (ret) panic("Failed to split linear map\n"); - flush_tlb_kernel_range(lstart, lend); + if (split_args->mode !=3D SPLIT_COLLECT) + flush_tlb_kernel_range(lstart, lend); =20 /* * Relies on dsb in flush_tlb_kernel_range() to avoid reordering @@ -963,10 +1034,61 @@ static int __init linear_map_split_to_ptes(void *__u= nused) =20 void __init linear_map_maybe_split_to_ptes(void) { - if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort()) { - init_idmap_kpti_bbml2_flag(); - stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask); + struct page **pages =3D NULL; + struct split_args split_args; + int order; + int nr_populated; + int err; + + if (!linear_map_requires_bbml2 || system_supports_bbml2_noabort()) + return; + + /* + * Phase 1: Collect required extra pages to split. + */ + split_args.mode =3D SPLIT_COLLECT; + split_args.nr =3D 0; + + init_idmap_kpti_bbml2_flag(); + stop_machine(linear_map_split_to_ptes, &split_args, cpu_online_mask); + + /* + * Phase 2: Allocate necessary pages to split. + */ + if (split_args.nr =3D=3D 0) { + err =3D 0; + split_args.mode =3D SPLIT_ALLOC; + } else { + err =3D -ENOMEM; + order =3D order_base_2(PAGE_ALIGN(split_args.nr * + sizeof(struct page *)) >> PAGE_SHIFT); + + pages =3D (struct page **)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, orde= r); + if (!pages) + goto error; + + nr_populated =3D alloc_pages_bulk(GFP_ATOMIC | __GFP_ZERO, split_args.nr= , pages); + if (nr_populated < split_args.nr) + goto error; + + err =3D 0; + split_args.mode =3D SPLIT_PREALLOC; + split_args.i =3D 0; + split_args.pages =3D pages; } + + /* + * Phase 3: Split linear map. + */ + init_idmap_kpti_bbml2_flag(); + stop_machine(linear_map_split_to_ptes, &split_args, cpu_online_mask); + +error: + if (pages) + free_pages((unsigned long)pages, order); + + if (err) + panic("Failed to split linear map: %d\n", err); } =20 /* @@ -1087,6 +1209,11 @@ bool arch_kfence_init_pool(void) unsigned long start =3D (unsigned long)__kfence_pool; unsigned long end =3D start + KFENCE_POOL_SIZE; int ret; + struct split_args split_args =3D { + .mode =3D SPLIT_ALLOC, + .gfp =3D GFP_PGTABLE_KERNEL, + }; + =20 /* Exit early if we know the linear map is already pte-mapped. */ if (!split_leaf_mapping_possible()) @@ -1097,7 +1224,7 @@ bool arch_kfence_init_pool(void) return true; =20 mutex_lock(&pgtable_split_lock); - ret =3D range_split_to_ptes(start, end, GFP_PGTABLE_KERNEL); + ret =3D range_split_to_ptes(start, end, &split_args); mutex_unlock(&pgtable_split_lock); =20 /* --=20 LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} From nobody Sun Feb 8 10:30:07 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 2726333D6E4 for ; Wed, 17 Dec 2025 18:20:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765995626; cv=none; b=grGks5sOFbj0UInDrxtP7n01GeGuwD3pGcV59TPeJFjWco2NkQMUQlacnnp7JhYf35S2XkOLvxitdOHg+4bbKnqrY+kN4dqXc4eqa6r8i404J19QIDB3H/VBxuIHrzc8tbz5fZJiqK9u0p4AnSy1GJ2W8kQjTRyrom0n0vWPYE8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765995626; c=relaxed/simple; bh=XL9s9Cqb6Kf24GAckNvbFu7q7YbrvMVJeZw50tgJC+U=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=u01jE/b2+4Fxwu3Zz8CNpBf4P5muHGOSB8YOnzdUHtyetSoMP3Pdcga/uvq6jaakHZIetRPNjOO3pN4ku5Qm8rAoxQ3EF2R4oYzb609JMwpOSVRnaeENJ1qVOg9EDRIWsapgPvyVKTjKwGdMtvVUcvn/JkK7nFOkIl4XrGhCxR4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 5F53E339; Wed, 17 Dec 2025 10:20:11 -0800 (PST) Received: from e129823.cambridge.arm.com (e129823.arm.com [10.1.197.6]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 64F3A3F5CA; Wed, 17 Dec 2025 10:20:15 -0800 (PST) From: Yeoreum Yun To: catalin.marinas@arm.com, will@kernel.org, ryan.roberts@arm.com, akpm@linux-foundation.org, david@kernel.org, kevin.brodsky@arm.com, quic_zhenhuah@quicinc.com, dev.jain@arm.com, yang@os.amperecomputing.com, chaitanyas.prakash@arm.com, bigeasy@linutronix.de, clrkwllms@kernel.org, rostedt@goodmis.org, lorenzo.stoakes@oracle.com, ardb@kernel.org, jackmanb@google.com, vbabka@suse.cz Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, linux-rt-devel@lists.linux.dev, Yeoreum Yun Subject: [PATCH v2 2/2] arm64: mmu: avoid allocating pages while installing ng-mapping for KPTI Date: Wed, 17 Dec 2025 18:20:07 +0000 Message-Id: <20251217182007.2345700-3-yeoreum.yun@arm.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251217182007.2345700-1-yeoreum.yun@arm.com> References: <20251217182007.2345700-1-yeoreum.yun@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The current __kpti_install_ng_mappings() allocates a temporary PGD while installing the NG mapping for KPTI under stop_machine(), using GFP_ATOMIC. This is fine in the non-PREEMPT_RT case. However, it becomes a problem under PREEMPT_RT because generic memory allocation/free APIs (e.g., pgtable_alloc(), __get_free_pages(), etc.) cannot be invoked in a non-preemptible context, except for the *_nolock() variants. These generic allocators may sleep due to their use of spin_lock(). In other words, calling __get_free_pages(), even with GFP_ATOMIC, is not allowed in __kpti_install_ng_mappings(), which is executed by the stopper thread where preemption is disabled under PREEMPT_RT. To address this, preallocate the page needed for the temporary PGD before invoking __kpti_install_ng_mappings() via stop_machine(). Fixes: 47546a1912fc ("arm64: mm: install KPTI nG mappings with MMU enabled") Signed-off-by: Yeoreum Yun Reviewed-by: Ryan Roberts --- arch/arm64/mm/mmu.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e4e6c7e0a016..69d9651de0cd 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1360,7 +1360,7 @@ static phys_addr_t __init kpti_ng_pgd_alloc(enum pgta= ble_type type) return kpti_ng_temp_alloc; } =20 -static int __init __kpti_install_ng_mappings(void *__unused) +static int __init __kpti_install_ng_mappings(void *data) { typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long); extern kpti_remap_fn idmap_kpti_install_ng_mappings; @@ -1368,10 +1368,9 @@ static int __init __kpti_install_ng_mappings(void *_= _unused) =20 int cpu =3D smp_processor_id(); int levels =3D CONFIG_PGTABLE_LEVELS; - int order =3D order_base_2(levels); u64 kpti_ng_temp_pgd_pa =3D 0; pgd_t *kpti_ng_temp_pgd; - u64 alloc =3D 0; + u64 alloc =3D *(u64 *)data; =20 if (levels =3D=3D 5 && !pgtable_l5_enabled()) levels =3D 4; @@ -1382,8 +1381,6 @@ static int __init __kpti_install_ng_mappings(void *__= unused) =20 if (!cpu) { int ret; - - alloc =3D __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); kpti_ng_temp_pgd =3D (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); kpti_ng_temp_alloc =3D kpti_ng_temp_pgd_pa =3D __pa(kpti_ng_temp_pgd); =20 @@ -1414,16 +1411,17 @@ static int __init __kpti_install_ng_mappings(void *= __unused) remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA); cpu_uninstall_idmap(); =20 - if (!cpu) { - free_pages(alloc, order); + if (!cpu) arm64_use_ng_mappings =3D true; - } =20 return 0; } =20 void __init kpti_install_ng_mappings(void) { + int order =3D order_base_2(CONFIG_PGTABLE_LEVELS); + u64 alloc; + /* Check whether KPTI is going to be used */ if (!arm64_kernel_unmapped_at_el0()) return; @@ -1436,8 +1434,14 @@ void __init kpti_install_ng_mappings(void) if (arm64_use_ng_mappings) return; =20 + alloc =3D __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); + if (!alloc) + panic("Failed to alloc page tables\n"); + init_idmap_kpti_bbml2_flag(); - stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask); + stop_machine(__kpti_install_ng_mappings, &alloc, cpu_online_mask); + + free_pages(alloc, order); } =20 static pgprot_t __init kernel_exec_prot(void) --=20 LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}