From nobody Thu Feb 12 04:59:37 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 65DEFC77B61 for ; Fri, 28 Apr 2023 09:56:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345979AbjD1J4Z (ORCPT ); Fri, 28 Apr 2023 05:56:25 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36472 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345838AbjD1Jzo (ORCPT ); Fri, 28 Apr 2023 05:55:44 -0400 Received: from out187-18.us.a.mail.aliyun.com (out187-18.us.a.mail.aliyun.com [47.90.187.18]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id ED3095BBC for ; Fri, 28 Apr 2023 02:55:12 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R271e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018047213;MF=houwenlong.hwl@antgroup.com;NM=1;PH=DS;RN=16;SR=0;TI=SMTPD_---.STFoGcy_1682675612; Received: from localhost(mailfrom:houwenlong.hwl@antgroup.com fp:SMTPD_---.STFoGcy_1682675612) by smtp.aliyun-inc.com; Fri, 28 Apr 2023 17:53:33 +0800 From: "Hou Wenlong" To: linux-kernel@vger.kernel.org Cc: "Thomas Garnier" , "Lai Jiangshan" , "Kees Cook" , "Hou Wenlong" , "Thomas Gleixner" , "Ingo Molnar" , "Borislav Petkov" , "Dave Hansen" , , "H. Peter Anvin" , "Juergen Gross" , "Anshuman Khandual" , "Mike Rapoport" , "Josh Poimboeuf" , "Pasha Tatashin" Subject: [PATCH RFC 32/43] x86/boot/64: Use data relocation to get absloute address when PIE is enabled Date: Fri, 28 Apr 2023 17:51:12 +0800 Message-Id: X-Mailer: git-send-email 2.31.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" When PIE is enabled, all symbol references are RIP-relative, so there is no need to fixup global symbol references when in low address. However, in order to acquire absloute virtual address of symbol, introduce a macro to use data relocation to get it. Suggested-by: Lai Jiangshan Signed-off-by: Hou Wenlong Cc: Thomas Garnier Cc: Kees Cook --- arch/x86/kernel/head64.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 49f7629b17f7..ef7ad96f2154 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -86,10 +86,22 @@ static struct desc_ptr startup_gdt_descr =3D { =20 #define __head __section(".head.text") =20 +#ifdef CONFIG_X86_PIE +#define SYM_ABS_VAL(sym) \ + ({ static unsigned long __initdata __##sym =3D (unsigned long)sym; __##sy= m; }) + +static void __head *fixup_pointer(void *ptr, unsigned long physaddr) +{ + return ptr; +} +#else +#define SYM_ABS_VAL(sym) ((unsigned long)sym) + static void __head *fixup_pointer(void *ptr, unsigned long physaddr) { return ptr - (void *)_text + (void *)physaddr; } +#endif /* CONFIG_X86_PIE */ =20 static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr) { @@ -142,8 +154,8 @@ static unsigned long __head sme_postprocess_startup(str= uct boot_params *bp, pmdv * attribute. */ if (sme_get_me_mask()) { - vaddr =3D (unsigned long)__start_bss_decrypted; - vaddr_end =3D (unsigned long)__end_bss_decrypted; + vaddr =3D SYM_ABS_VAL(__start_bss_decrypted); + vaddr_end =3D SYM_ABS_VAL(__end_bss_decrypted); =20 for (; vaddr < vaddr_end; vaddr +=3D PMD_SIZE) { /* @@ -189,6 +201,8 @@ unsigned long __head __startup_64(unsigned long physadd= r, bool la57; int i; unsigned int *next_pgt_ptr; + unsigned long text_base =3D SYM_ABS_VAL(_text); + unsigned long end_base =3D SYM_ABS_VAL(_end); =20 la57 =3D check_la57_support(physaddr); =20 @@ -200,7 +214,7 @@ unsigned long __head __startup_64(unsigned long physadd= r, * Compute the delta between the address I am compiled to run at * and the address I am actually running at. */ - load_delta =3D physaddr - (unsigned long)(_text - __START_KERNEL_map); + load_delta =3D physaddr - (text_base - __START_KERNEL_map); =20 /* Is the address not 2M aligned? */ if (load_delta & ~PMD_MASK) @@ -214,9 +228,9 @@ unsigned long __head __startup_64(unsigned long physadd= r, pgd =3D fixup_pointer(&early_top_pgt, physaddr); p =3D pgd + pgd_index(__START_KERNEL_map); if (la57) - *p =3D (unsigned long)level4_kernel_pgt; + *p =3D SYM_ABS_VAL(level4_kernel_pgt); else - *p =3D (unsigned long)level3_kernel_pgt; + *p =3D SYM_ABS_VAL(level3_kernel_pgt); *p +=3D _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta; =20 if (la57) { @@ -273,7 +287,7 @@ unsigned long __head __startup_64(unsigned long physadd= r, pmd_entry +=3D sme_get_me_mask(); pmd_entry +=3D physaddr; =20 - for (i =3D 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { + for (i =3D 0; i < DIV_ROUND_UP(end_base - text_base, PMD_SIZE); i++) { int idx =3D i + (physaddr >> PMD_SHIFT); =20 pmd[idx % PTRS_PER_PMD] =3D pmd_entry + i * PMD_SIZE; @@ -298,11 +312,11 @@ unsigned long __head __startup_64(unsigned long physa= ddr, pmd =3D fixup_pointer(level2_kernel_pgt, physaddr); =20 /* invalidate pages before the kernel image */ - for (i =3D 0; i < pmd_index((unsigned long)_text); i++) + for (i =3D 0; i < pmd_index(text_base); i++) pmd[i] &=3D ~_PAGE_PRESENT; =20 /* fixup pages that are part of the kernel image */ - for (; i <=3D pmd_index((unsigned long)_end); i++) + for (; i <=3D pmd_index(end_base); i++) if (pmd[i] & _PAGE_PRESENT) pmd[i] +=3D load_delta; =20 --=20 2.31.1