From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CE09AC433EF for ; Mon, 16 May 2022 13:17:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S241023AbiEPNRL (ORCPT ); Mon, 16 May 2022 09:17:11 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36812 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S241306AbiEPNRD (ORCPT ); Mon, 16 May 2022 09:17:03 -0400 Received: from mail-pl1-x632.google.com (mail-pl1-x632.google.com [IPv6:2607:f8b0:4864:20::632]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7069D24596 for ; Mon, 16 May 2022 06:17:02 -0700 (PDT) Received: by mail-pl1-x632.google.com with SMTP id q7so2992000plx.3 for ; Mon, 16 May 2022 06:17:02 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=jZmFtLhq9vgJsY5ZvagaghPeiw7jxmPgPePzPZmM8XU=; b=jFjBVxlpFTvfFJLCXo6/txw0+pGzSPh7KYZJLqbjH1nJx0D2pWAxclCbsJbAOALeWy ey4NE3CkBDrzT3YkCXNEA2ruu/Oxs2bl3jy5jXAqtCTdL7cd2hO1YblcMrfqTI0NmIts f5kTRHrnetbfvfbpGSzqOYcNG1RVutKa1PW9If2gy6qO9plyS41MM4QX2GhZgDTsDxld xXTurcC5tmBWdQO2aNB4y81nV7WSg9c8sOMJZoe6W6PdXNMX/m6EP06Wnls352go2VPu pNYcimlePpGexVPj96n/OMCR0fV15GA4qkdBQxJqs3w/DQXIfSll9Z9r+p5FkCzcy+6u Jq5g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=jZmFtLhq9vgJsY5ZvagaghPeiw7jxmPgPePzPZmM8XU=; b=0JKXQ0vi1og7s8eeaqxehyvneQnwhyPrDMyw0urQAbLb9okMV5EN6cViJW5T96zFWC AQJOnMWLKQefEKDRlqE5q/SNw4Q7a5d/geZCbvtt7BHeDXYBeFji3RxnhWkxc89wUfcX B49BtxMlZp4ELaP1XrvrjcIMoZSNT7YR5g8s5AzuJne45glbVJDpnylJZ5K+RiV8IFuE ZJ+pxU5/0K9xzXlKWCVO2wxzQR4T44x3GUvqEN+qBY+fE1HBA93o/EsZAgT2D9CLCc+q ZClp5GzymeOgzxG4eP4xsenORhg/HCrqmQTwPZuymSMu8UQGyuFFN3DSSY70HDdhU8VG W/YQ== X-Gm-Message-State: AOAM532E9e09FJyVjWbkwyK5dIXCaQ7Tyo2Ob79t5O++Txi9Im/Eq4Ep LqT6J/kY6c9SnYMYr7/yKQ/2FSfoeuU= X-Google-Smtp-Source: ABdhPJy0NlFZsDxGG3uSQhrh6lT3zfuZnDqldMs9IzpBEwGV5Iop4UOrgNtU83wuKwZYvM8+15HaLQ== X-Received: by 2002:a17:903:11d1:b0:151:9fb2:9858 with SMTP id q17-20020a17090311d100b001519fb29858mr17235691plh.136.1652707021633; Mon, 16 May 2022 06:17:01 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id n12-20020a17090aab8c00b001df4c27e5a5sm2359592pjq.35.2022.05.16.06.17.00 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:01 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" , Kuppuswamy Sathyanarayanan , Joerg Roedel , "Kirill A. Shutemov" , Miguel Ojeda , Kees Cook , Nathan Chancellor , Andrew Morton , Alexei Starovoitov , Marco Elver , Hao Luo , Nick Desaulniers , Rasmus Villemoes Subject: [PATCH V2 1/7] x86/entry: Introduce __entry_text for entry code written in C Date: Mon, 16 May 2022 21:17:32 +0800 Message-Id: <20220516131739.521817-2-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan Some entry code will be implemented in C files. Introduce __entry_text to set them in .entry.text section. The new __entry_text disables instrumentation like noinstr, so __noinstr_section() is added for noinstr and the new __entry_text. Note, entry code can not access to %gs before the %gs base is switched to kernel %gs base, so stack protector can not be used on the C entry code. But __entry_text doesn't disable stack protector since some compilers might not support function level granular attribute to disable stack protector. It will be disabled in C file level. Cc: Borislav Petkov Reviewed-by: Miguel Ojeda Reviewed-by: Kees Cook Suggested-by: Nick Desaulniers Suggested-by: Peter Zijlstra Signed-off-by: Lai Jiangshan --- arch/x86/include/asm/idtentry.h | 3 +++ include/linux/compiler_types.h | 8 +++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentr= y.h index 72184b0b2219..acc4c99f801c 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -13,6 +13,9 @@ =20 #include =20 +/* Entry code written in C. */ +#define __entry_text __noinstr_section(".entry.text") + /** * DECLARE_IDTENTRY - Declare functions for simple IDT entry points * No error code pushed by hardware diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 1c2c33ae1b37..1d6580ccb081 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -225,9 +225,11 @@ struct ftrace_likely_data { #endif =20 /* Section for code which can't be instrumented at all */ -#define noinstr \ - noinline notrace __attribute((__section__(".noinstr.text"))) \ - __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage +#define __noinstr_section(section) \ + noinline notrace __section(section) __no_kcsan \ + __no_sanitize_address __no_profile __no_sanitize_coverage + +#define noinstr __noinstr_section(".noinstr.text") =20 #endif /* __KERNEL__ */ =20 --=20 2.19.1.6.gb485710b From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 74939C433FE for ; Mon, 16 May 2022 13:17:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243798AbiEPNRS (ORCPT ); Mon, 16 May 2022 09:17:18 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37154 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243733AbiEPNRI (ORCPT ); Mon, 16 May 2022 09:17:08 -0400 Received: from mail-pg1-x52e.google.com (mail-pg1-x52e.google.com [IPv6:2607:f8b0:4864:20::52e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5A9C324595 for ; Mon, 16 May 2022 06:17:07 -0700 (PDT) Received: by mail-pg1-x52e.google.com with SMTP id 202so14042090pgc.9 for ; Mon, 16 May 2022 06:17:07 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=EGwJvdx17QQnoYP69Pr2C9PuIA0fCg3Px0T/9mdBCv4=; b=Sa/AVjaJl2CVdPszAmhuAcV6Mbr8GE/PU96pQtUg/45ECD7xZOOycZUec+JP53+ZSK BiLVmf9tWXJUKBCn6Y4nfbsLs6zqCclJaeurHB3WwZAqQ6CP0ut+8CW5iZlfg18H8CNl 0zndtNZTvH33J1UGPxBUoupht6P5COVr5yqjVa+9ySiAJ3lRhgwpR6I1GC9AHRV8qsH3 PmI42q7EWuEud2ryZmi424LzgHEPuHn5lM0EdAjsqgnc+kV6LURWvpPRfNRPBfhsRZOL o3hc1nOS7QpB4vjBU+QCLNtE+v0AmGoGmI0eioBkhne9tDrhkrpPpZO6Pl5z8FU2Lv2d YsrQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=EGwJvdx17QQnoYP69Pr2C9PuIA0fCg3Px0T/9mdBCv4=; b=0AP2vnt9Gi0hIaf3aPa/5e9sQDe/sCe+L873DaMHu+R0qzyLhXDmJy3e8gccof/p3r DgVXQ+qF4K4SR4OhlbnmpUaeQNDJy9+NkLHu4zN8HYV1ndFbXBLk+jub3oKoZX5MnDUW hNBnOIZMrtHbyWBQ3qLU5qDrNFqt0mMZ6b9ND+avm4iGciqEHFgd1jIVcggaCkmhhQRR GF4vDNaq559JXffH+EuJnXGVigyQqU6Q0rxyM8Bzm8iQXmlLKY+cCaGBxI61nIbusmho 02ClCso7wxkVSbGtoQcB4A6QXlJYtZ1W1iy7FgYBTwPqRD675zRTqryQeJaK+F/DReSL CTKA== X-Gm-Message-State: AOAM530tsuFEkvoIYYuv17sSePsuPfma7KI0Ujm7CqxFbtHciDgE4j// 6UH7RN7fxXiqvHbiFEOR+v3tsXxqljk= X-Google-Smtp-Source: ABdhPJySgi8QBRUrwpM/EHUre4li9fld8uaNY0ZYrkfOf9flWdptIN6jPc1mzar4cffxt9AYpzHR8g== X-Received: by 2002:a63:f959:0:b0:3da:ed42:5f7c with SMTP id q25-20020a63f959000000b003daed425f7cmr15331306pgk.361.1652707026802; Mon, 16 May 2022 06:17:06 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id cu8-20020a056a00448800b0050dc76281b3sm6935816pfb.141.2022.05.16.06.17.06 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:06 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" Subject: [PATCH V2 2/7] x86/entry: Move PTI_USER_* to arch/x86/include/asm/processor-flags.h Date: Mon, 16 May 2022 21:17:33 +0800 Message-Id: <20220516131739.521817-3-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan These constants will be also used in C file. Move them to arch/x86/include/asm/processor-flags.h which already has a kin X86_CR3_PTI_PCID_USER_BIT defined. Signed-off-by: Lai Jiangshan --- arch/x86/entry/calling.h | 10 ---------- arch/x86/include/asm/processor-flags.h | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index a97cc78ecb92..f0f60810aee7 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -142,16 +142,6 @@ For 32-bit we have the following conventions - kernel = is built with =20 #ifdef CONFIG_PAGE_TABLE_ISOLATION =20 -/* - * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two - * halves: - */ -#define PTI_USER_PGTABLE_BIT PAGE_SHIFT -#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) -#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT -#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) -#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGT= ABLE_MASK) - .macro SET_NOFLUSH_BIT reg:req bts $X86_CR3_PCID_NOFLUSH_BIT, \reg .endm diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/= processor-flags.h index 02c2cbda4a74..4dd2fbbc861a 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -4,6 +4,7 @@ =20 #include #include +#include =20 #ifdef CONFIG_VM86 #define X86_VM_MASK X86_EFLAGS_VM @@ -50,7 +51,21 @@ #endif =20 #ifdef CONFIG_PAGE_TABLE_ISOLATION + # define X86_CR3_PTI_PCID_USER_BIT 11 + +#ifdef CONFIG_X86_64 +/* + * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two + * halves: + */ +#define PTI_USER_PGTABLE_BIT PAGE_SHIFT +#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) +#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT +#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) +#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGT= ABLE_MASK) +#endif + #endif =20 #endif /* _ASM_X86_PROCESSOR_FLAGS_H */ --=20 2.19.1.6.gb485710b From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DBAABC433EF for ; Mon, 16 May 2022 13:17:25 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243809AbiEPNRY (ORCPT ); Mon, 16 May 2022 09:17:24 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37494 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243216AbiEPNRN (ORCPT ); Mon, 16 May 2022 09:17:13 -0400 Received: from mail-pl1-x631.google.com (mail-pl1-x631.google.com [IPv6:2607:f8b0:4864:20::631]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0C69124952 for ; Mon, 16 May 2022 06:17:12 -0700 (PDT) Received: by mail-pl1-x631.google.com with SMTP id i1so14397883plg.7 for ; Mon, 16 May 2022 06:17:12 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=QPVmCWUy/pAelwn42xkgA5+/zRNQTu28RGMt2EDlrN0=; b=dKY6h7ZsI4pVBvrxHfHPPaegi8MMoVaKM/UCpPdoFM0qDfYhGhk65ILAr+auBUalZb GlJldS7I3QfheX5OIh2GLJeqOPYxqbPFBx81u7JKM+xZdP4RMsjg2h499XVBWcjcjnQc +QyxsEkiuSmI8tYNl+BCee4SqPVCykJIIhb4emm+51h2e8epMVFBtVtNBOgw1Hiqjbms rLthOTvrDD1Ea9yyGlPhtBhAF4W8GoNLn6sWLcGgoOj8WLLiGLoFYxe1CO7LY8SEeB9U AoXoIkCyTahZ9yHXeBCofXMFMHyrede9xAB8DluvW5UfTzvUvKei8dqYPE+I2mozztoY SncA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=QPVmCWUy/pAelwn42xkgA5+/zRNQTu28RGMt2EDlrN0=; b=VioaLkZkK0n+aZ+HObQbsAfyLAWylSQXRgzab3iYQFyNd2XXNvMHl0WANEyBQDspMd 3Ze961Etap/XXProqsXCWCKenF+LSVPvNKwKtWXjKYTY93PhnQjzb1Vs0iQftxm6nz8R x5q/jz6sFGvf9CKjNUFVx2Lf82C/jxp5O1WRr8c4Frilm8Kcu6+swis6EFDtdSnzLglp tcns/S70uL07voYDfonFl+EIn2z+SW0SqjeZID9/M8YU3IeTtSehKCSPYIS5Rd24OvNk 3qwBlCeQFme1y44vhylpZ41Wx1aw598pCrMCikbAe3dEVewdZHFxZK27iCUeu3u/M7Aq 1bfA== X-Gm-Message-State: AOAM5319awLF6WTwveFtCgDZj3BpGlygRl275uLRnLDCGhLLrqJbQHQa FkBNoDAiisFyTTgopf6lQF09Rzn4AT8= X-Google-Smtp-Source: ABdhPJx9nQG9bcFjsTqtttmgbi21slFg7XmMCrJ5I0wRYdLrI+WVXp+I9pBDjk1TxkuGFMIhKARkwQ== X-Received: by 2002:a17:902:e808:b0:161:946c:d2a5 with SMTP id u8-20020a170902e80800b00161946cd2a5mr1782372plg.93.1652707031495; Mon, 16 May 2022 06:17:11 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id e18-20020a170902ed9200b0015eab1ad02esm6931710plj.215.2022.05.16.06.17.10 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:11 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" , Kees Cook , Brian Gerst Subject: [PATCH V2 3/7] x86: Mark __native_read_cr3() & native_write_cr3() as __always_inline Date: Mon, 16 May 2022 21:17:34 +0800 Message-Id: <20220516131739.521817-4-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan Mark __native_read_cr3() & native_write_cr3() as __always_inline to ensure they are not instrumentable and in the .entry.text section if the caller is not instrumentable and in the .entry.text section. It prepares for __native_read_cr3() and native_write_cr3() to be used in the C entry code for handling KPTI. Signed-off-by: Lai Jiangshan --- arch/x86/include/asm/special_insns.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/sp= ecial_insns.h index 45b18eb94fa1..dbaee50abb3c 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -42,14 +42,14 @@ static __always_inline void native_write_cr2(unsigned l= ong val) asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); } =20 -static inline unsigned long __native_read_cr3(void) +static __always_inline unsigned long __native_read_cr3(void) { unsigned long val; asm volatile("mov %%cr3,%0\n\t" : "=3Dr" (val) : __FORCE_ORDER); return val; } =20 -static inline void native_write_cr3(unsigned long val) +static __always_inline void native_write_cr3(unsigned long val) { asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); } --=20 2.19.1.6.gb485710b From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0A558C433EF for ; Mon, 16 May 2022 13:17:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243833AbiEPNR3 (ORCPT ); Mon, 16 May 2022 09:17:29 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37998 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243803AbiEPNRS (ORCPT ); Mon, 16 May 2022 09:17:18 -0400 Received: from mail-pl1-x632.google.com (mail-pl1-x632.google.com [IPv6:2607:f8b0:4864:20::632]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DF97E24F09 for ; Mon, 16 May 2022 06:17:16 -0700 (PDT) Received: by mail-pl1-x632.google.com with SMTP id q7so2992763plx.3 for ; Mon, 16 May 2022 06:17:16 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=4unt6PUqjlwmajoQtsGt030sfPAmI7QPMB4lAchbRI4=; b=AtmXzsFGqnR9c5Cs0M9H4ANTDWzVo0obhDx0Byxa/U6YpWizVKa64Iyqt2EGiClRA+ VLHNoXr2zYdMbeG+dU1n75jWCTjmDEby6oF15oiNKVSyOQsQBIOv9179Sd57GBD/6gx+ WOwix/VT5awLCufz/3lzPSWxGDK2/V4UGQm2BWdHtl2oIgQcWkQqZV6l0/E0pLwD9YqN 6zaQpTIobz5M4XsfAjAmZpHRmISGMjvdtx3pp3y0DoujWLw+9RTkuVDr3bHhIFguxwoe Y9LSykPz410ggoe8WN3imLkqwY0tzyfm35UqPNRzMYj1m50tEo1e42pgRM/A4mFKBG/c tdPw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=4unt6PUqjlwmajoQtsGt030sfPAmI7QPMB4lAchbRI4=; b=7zzBQjJEeqruWRetr2F8FiJWRFMDweEcdkisvkWrQ/t+DN3KEWSsMfMPoej7kMT77h 6u5zpwO+FmaSM+l5kX3f1RqRoZpG0TUoVDHLA4ijUm4kULuYPFj0rf3ooPtgyaSqPnGl dlumtgKWem4zbBp2b6e7NTq6c/8iGYCRFWZ9CMh6puumICdFqVw+RBl0wl+R/DGxR7GQ vUeGfgUvskzsVvg3JRZxEC56KxNvkxYZjmgbT7rLUmVUa/dmnoIfQlCJtt5fsuwlHyvc T/KcaqEi1+AU7FQS6iJvhEia+3tsTurVH5FFBGl/vfYCQ7GtqDXHFL79XRwst0T3PB6B 5bfA== X-Gm-Message-State: AOAM530mDLdKTduazN1hI6ChCvtduk3RKe8zkOCESozPgEFICv9pDSRn jXCgKTItrEpTbclUBC+ZP+GOZHrQ9oY= X-Google-Smtp-Source: ABdhPJzXUfHg9ffsepRbRBo3K6rwEtxQYoA51K6uUH6LeR9NHvspfUO4WDKkZ4vtn71tgPToZp3Lug== X-Received: by 2002:a17:902:e80d:b0:15e:b27b:931c with SMTP id u13-20020a170902e80d00b0015eb27b931cmr17637053plg.5.1652707036218; Mon, 16 May 2022 06:17:16 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id a12-20020a170902900c00b0015e8d4eb1ddsm6990682plp.39.2022.05.16.06.17.15 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:16 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" Subject: [PATCH V2 4/7] x86/entry: Add arch/x86/entry/entry64.c for C entry code Date: Mon, 16 May 2022 21:17:35 +0800 Message-Id: <20220516131739.521817-5-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan Add a C file "entry64.c" to deposit C entry code for traps and faults which will be as the same logic as the existing ASM code in entry_64.S. The file is as low level as entry_64.S and its code can be running in the environments that the GS base is a user controlled value, or the CR3 is the KPTI user CR3 or both. All the code in this file should not be instrumentable. Many instrument facilities can be disabled by per-function attributes which are included in the macro __noinstr_section. But stack-protector can not be disabled function-granularly by some compliers. So stack-protector is disabled for the whole file in Makefile. Suggested-by: Joerg Roedel Signed-off-by: Lai Jiangshan --- arch/x86/entry/Makefile | 3 ++- arch/x86/entry/entry64.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 arch/x86/entry/entry64.c diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index 7fec5dcf6438..792f7009ff32 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -10,13 +10,14 @@ KCOV_INSTRUMENT :=3D n CFLAGS_REMOVE_common.o =3D $(CC_FLAGS_FTRACE) =20 CFLAGS_common.o +=3D -fno-stack-protector +CFLAGS_entry64.o +=3D -fno-stack-protector =20 obj-y :=3D entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o obj-y +=3D common.o +obj-$(CONFIG_X86_64) +=3D entry64.o =20 obj-y +=3D vdso/ obj-y +=3D vsyscall/ =20 obj-$(CONFIG_IA32_EMULATION) +=3D entry_64_compat.o syscall_32.o obj-$(CONFIG_X86_X32_ABI) +=3D syscall_x32.o - diff --git a/arch/x86/entry/entry64.c b/arch/x86/entry/entry64.c new file mode 100644 index 000000000000..ace73861c2a0 --- /dev/null +++ b/arch/x86/entry/entry64.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs + * Copyright (C) 2000 Pavel Machek + * Copyright (C) 2022 Lai Jiangshan, Ant Group + * + * Handle entries and exits for hardware traps and faults. + * + * It is as low level as entry_64.S and its code can be running in the + * environments that the GS base is a user controlled value, or the CR3 + * is the PTI user CR3 or both. + */ +#include --=20 2.19.1.6.gb485710b From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B59DAC433F5 for ; Mon, 16 May 2022 13:17:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243778AbiEPNRk (ORCPT ); Mon, 16 May 2022 09:17:40 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:38160 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243796AbiEPNRV (ORCPT ); Mon, 16 May 2022 09:17:21 -0400 Received: from mail-pj1-x1036.google.com (mail-pj1-x1036.google.com [IPv6:2607:f8b0:4864:20::1036]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A01A52529B for ; Mon, 16 May 2022 06:17:20 -0700 (PDT) Received: by mail-pj1-x1036.google.com with SMTP id t11-20020a17090a6a0b00b001df6f318a8bso1253845pjj.4 for ; Mon, 16 May 2022 06:17:20 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=FSeMlanFh2svXxaNRkwpD5s2Jt4O19S77leAuNHNOlo=; b=f5bpF3JgDxt953UNKD7px9h21rVHMzamUxSzdhuN+W5C3vs/DafoFQLV2eCR4gml3C SY0T1kPjdmclW0ZLySYYdws0f4oCIs9wH3olszqyO7jfj5rt/XGVkV4jpg/uQK3Tk8SO Y+quy7MLrfa7JseKtmQ0KArxfT2wgHF+yObcsJ2DetEcGj8nyi7IIXnTr1xm3KZbQrvF TdLQ0FQnnSx/5tHQx7sMGyVJmUZt6wVITKvawFvVjC2iL0dIcQiCzcxewzMMIY2sPjV2 4HX/N8+qoi/Pnx/Z4tthJDMNXQeNNNHyEJfQ3fA5lIJEAcvzsI1bt18UHNDXsYf1uwlt 3B3g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=FSeMlanFh2svXxaNRkwpD5s2Jt4O19S77leAuNHNOlo=; b=Wjem9v6jcn/ZjKylkym0mN2SRjQlllM07che1zwLPNCnD0VKKzxn/1z/mne+4SKI3H 78LioS+86RE00SjCQoRq4ZHc93nZHMVI+Ve16+Pqe6GcoLkGRbLl9Dv5GckpsAJONsdS hdIhrL3gLtAYKrX3iN8j4KtRSM90jcjjKNKfafv9FkIdc1y99ietuYq9k1xJ0czMj7A3 31LPc4rmF6BopVebfxGPE1oSBjbTaRY6AUrRzltT9qQhJMspfPTunmtPi58PSMwaK10w 4Y0uMi/dR4u4FPS5TRhogyyoSRB1FTHEXoKYB9fjICDS1vbZ5TMY3spvel2sqlngQGVv IRSQ== X-Gm-Message-State: AOAM531ik0P9s9/Ic/VT+0/0biby1VOh2+8PKv6KMq+1kXbG3Si9DpQM +BPqA5ZlyeDEDM2zgQMMnOPr8KdsBvc= X-Google-Smtp-Source: ABdhPJwOiUymlP+QbQi0YPbdKtc/CtinLbNUGq0oCm+dASSQuAb/kkZiUa6GSMl7RHPrLcE8faIhEQ== X-Received: by 2002:a17:902:9a4c:b0:156:6735:b438 with SMTP id x12-20020a1709029a4c00b001566735b438mr17862669plv.46.1652707039909; Mon, 16 May 2022 06:17:19 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id h17-20020a62b411000000b0050df474e4d2sm6807753pfn.218.2022.05.16.06.17.19 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:19 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" Subject: [PATCH V2 5/7] x86/entry: Add the C verion of SWITCH_TO_KERNEL_CR3 as switch_to_kernel_cr3() Date: Mon, 16 May 2022 21:17:36 +0800 Message-Id: <20220516131739.521817-6-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan Add the C version switch_to_kernel_cr3() which implements the macro SWITCH_TO_KERNEL_CR3() in arch/x86/entry/calling.h. No functional difference intended. Note: The compiler generates "AND $0xe7,%ah" (3 bytes) for the code "cr3 =3D user_cr3 & ~PTI_USER_PGTABLE_AND_PCID_MASK" while the ASM code in SWITCH_TO_KERNEL_CR3() results "AND $0xffffffffffffe7ff,%rax" (6 bytes). The compiler generates lengthier code for "cr3 |=3D X86_CR3_PCID_NOFLUSH" because it uses "MOVABS+OR" (13 bytes) rather than a single "BTS" (5 bytes). Signed-off-by: Lai Jiangshan --- arch/x86/entry/entry64.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/x86/entry/entry64.c b/arch/x86/entry/entry64.c index ace73861c2a0..bd77cc8373ce 100644 --- a/arch/x86/entry/entry64.c +++ b/arch/x86/entry/entry64.c @@ -12,3 +12,27 @@ * is the PTI user CR3 or both. */ #include + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +static __always_inline void pti_switch_to_kernel_cr3(unsigned long user_cr= 3) +{ + /* + * Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 + * at kernel pagetables: + */ + unsigned long cr3 =3D user_cr3 & ~PTI_USER_PGTABLE_AND_PCID_MASK; + + if (static_cpu_has(X86_FEATURE_PCID)) + cr3 |=3D X86_CR3_PCID_NOFLUSH; + + native_write_cr3(cr3); +} + +static __always_inline void switch_to_kernel_cr3(void) +{ + if (static_cpu_has(X86_FEATURE_PTI)) + pti_switch_to_kernel_cr3(__native_read_cr3()); +} +#else +static __always_inline void switch_to_kernel_cr3(void) {} +#endif --=20 2.19.1.6.gb485710b From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4139EC433EF for ; Mon, 16 May 2022 13:17:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243914AbiEPNRp (ORCPT ); Mon, 16 May 2022 09:17:45 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:38962 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243837AbiEPNRc (ORCPT ); Mon, 16 May 2022 09:17:32 -0400 Received: from mail-pj1-x1035.google.com (mail-pj1-x1035.google.com [IPv6:2607:f8b0:4864:20::1035]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EBD6B25C4E for ; Mon, 16 May 2022 06:17:24 -0700 (PDT) Received: by mail-pj1-x1035.google.com with SMTP id a23-20020a17090acb9700b001df4e9f4870so2809620pju.1 for ; Mon, 16 May 2022 06:17:24 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=gkGEhy3VfxaC6VD1+0Znm8PQRe5Rv1gv7lzNV+9U6sU=; b=of3+wHvtm7eRVXWa+Ssr07ZyY84NwEb7ZN+SI2YphvMQkh5Q6DMlZeg/uxDg47EY83 x428ZDTLZKQ/JOyZzXK2F/0z/CEJaLRKEXu1z4Kp0R3ibMrbEqSu4K7/xvGc4qw9dmHg 3vJWXTpA5NVtaDb68jsFDUbzEDZOR0MbLGTJNH/7IrnJ9j0e8BmVs1gnoQfH69KHY7cV r0voCpvUFYk565pzY6cjTN6cHjci+46jXO//xZiKCdr3lxCIfSSbAACDbgUqfkWhlh3L C4QqZDRPllBJp0JiDiS9HAx3zSplCO5csXcIPlw+XufRp0B20aMsYcYcuL8LCe7eGGFn 7Ozw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=gkGEhy3VfxaC6VD1+0Znm8PQRe5Rv1gv7lzNV+9U6sU=; b=TfplgPhccZ8SjMbC6+Hsjyv/+b5s138UnpuJiW6t63BcxGlUvfbL+VacxKmu0t6/ep KwvVd+0N52M2QkxBlOgthtwABJG3S80ALQVbyD2jUztnJd25w5Kh6WvYg3ln+8Eg4MW1 ETRyApiPzAnrbiUrxUqE0EtVBNP5UlK2t+42pB021yPU+B3fKn4KmDld3bNslhhWJTxd 95n9elEtTlJHA5coMcmn114wAidvKTFVxbItmx21WwmT4sAK1M2ltBKqzXpH1HdlR7gf TuiMgJOp/xxaQVoVWiiayiCD98PZh1fmEP3FV4ZcmyV64o6pL3T29bTbvzUvGmCILOXO T6Qg== X-Gm-Message-State: AOAM531YklkF9KhymQrcFE2ljCv6s2coTbo0ou3mOw3GT+TFiDuDpwgy IcP64bhF+J/IvUqOH+ea1dcfrfBnGoY= X-Google-Smtp-Source: ABdhPJzIwhoAisiSVova9GP69StXH0dNk+cuV4VhXpQPNnlVcWeJoIRuRDpe9n+thmw2voMmSV5w0g== X-Received: by 2002:a17:90b:1752:b0:1df:2f90:87ee with SMTP id jf18-20020a17090b175200b001df2f9087eemr9664856pjb.94.1652707043596; Mon, 16 May 2022 06:17:23 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id y124-20020a62ce82000000b0050dc762812bsm6859188pfg.5.2022.05.16.06.17.22 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:23 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" Subject: [PATCH V2 6/7] x86/traps: Add fence_swapgs_{user,kernel}_entry() and user_entry_swapgs_and_fence() Date: Mon, 16 May 2022 21:17:37 +0800 Message-Id: <20220516131739.521817-7-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan Add the C version fence_swapgs_{user,kernel}_entry() in entry64.c which are the same as the ASM macro FENCE_SWAPGS_{USER,KERNEL}_ENTRY. fence_swapgs_user_entry is used in the user entry swapgs code path, to prevent a speculative swapgs when coming from kernel space. fence_swapgs_kernel_entry is used in the kernel entry code path, to prevent the swapgs from getting speculatively skipped when coming from user space. Add the C user_entry_swapgs_and_fence() which implements the ASM code: swapgs FENCE_SWAPGS_USER_ENTRY It will be used in the user entry swapgs code path, doing the swapgs and lfence to prevent a speculative swapgs when coming from kernel space. Cc: Josh Poimboeuf Suggested-by: Peter Zijlstra Signed-off-by: Lai Jiangshan --- arch/x86/entry/entry64.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/arch/x86/entry/entry64.c b/arch/x86/entry/entry64.c index bd77cc8373ce..f7f23800cee4 100644 --- a/arch/x86/entry/entry64.c +++ b/arch/x86/entry/entry64.c @@ -36,3 +36,33 @@ static __always_inline void switch_to_kernel_cr3(void) #else static __always_inline void switch_to_kernel_cr3(void) {} #endif + +/* + * Mitigate Spectre v1 for conditional swapgs code paths. + * + * fence_swapgs_user_entry is used in the user entry swapgs code path, to + * prevent a speculative swapgs when coming from kernel space. It must be + * used with switch_to_kernel_cr3() in the same path. + * + * fence_swapgs_kernel_entry is used in the kernel entry code path without + * CR3 write or with conditinal CR3 write only, to prevent the swapgs from + * getting speculatively skipped when coming from user space. + * + * user_entry_swapgs_and_fence is a wrapper of swapgs and fence for user e= ntry + * code path. + */ +static __always_inline void fence_swapgs_user_entry(void) +{ + alternative("", "lfence", X86_FEATURE_FENCE_SWAPGS_USER); +} + +static __always_inline void fence_swapgs_kernel_entry(void) +{ + alternative("", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL); +} + +static __always_inline void user_entry_swapgs_and_fence(void) +{ + native_swapgs(); + fence_swapgs_user_entry(); +} --=20 2.19.1.6.gb485710b From nobody Fri May 8 00:13:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 399A8C433EF for ; Mon, 16 May 2022 13:17:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243892AbiEPNRw (ORCPT ); Mon, 16 May 2022 09:17:52 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39100 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243901AbiEPNRm (ORCPT ); Mon, 16 May 2022 09:17:42 -0400 Received: from mail-pl1-x631.google.com (mail-pl1-x631.google.com [IPv6:2607:f8b0:4864:20::631]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 253983A5FC for ; Mon, 16 May 2022 06:17:31 -0700 (PDT) Received: by mail-pl1-x631.google.com with SMTP id m12so14412272plb.4 for ; Mon, 16 May 2022 06:17:31 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=yNLH3g4oey3zSyxe+H2t8uP9EGdvCmwPMhX5Etl/GJY=; b=bgkcSbE/jBQG8HN5gGcom2Ik6rP6OVJC4RgHkF7II5JVJiPATV/HQmgqNfKHLF4Dkx QJn+cchJ205TcH/0sbKhn5diZ7r9Qbzw7JwurPgcIGj1Oc4sFdJmScHL9xvimbVscmyh VqEIg97hB9V0JlfNCyw7kfa5cSRFAM64yibQvhdFCaSEg/buIgf3mKqf/IEFoI64ygO6 zW8YuN0/Rk/Kva310hsf0HOkYqfs8c2G1yIkAIuNqRal2AeCW9C11ZXNUpp5WplhlrB2 LXzARbhUnbEpx8+euVtpVFAd8rp2Qz357/cvuBjkqGz3ZfQQcxh6h6CtjNhkxM8KEht5 JEUQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=yNLH3g4oey3zSyxe+H2t8uP9EGdvCmwPMhX5Etl/GJY=; b=AbCefHk5BEwvfqpwG+VJD83JsVegNY5TClsv8B73bLm8L3/D+/cJFnHFvlms3VG422 4heOK0A6QeFXVD3Eh/Fn+7gOamEioX0cno4A8pFRY2Es4MWOAz/A5OcAdR1osjLukF2T DoETIMibMXhUM1ySrDrPwvCvuj8zWMbLPaAneEQEpuBg0NpEGqB6iVE5LRJfc6Wtft+6 KHz4jscZBkW25FBkYjm0biReTjwPuEiE8eKVj0egkthMjNTzshT5Uz5ni+8wLbozTEtp DDTg8DpHzbOC66Iknrnd+ussPMQGN/ldJZyeHHGLGho8J+wFZnAHM4bkAfy48Cjhdeom 3I4w== X-Gm-Message-State: AOAM533XD8IsUDcbRY4aOQ5iS8CIxaaVvkH7GolIbg3yaB3wgIFPFQ5v 6aUJNBHcvTL09vyGdULjaudWU2U48GM= X-Google-Smtp-Source: ABdhPJwd4yznxcfDwX53COSduv50H7QUd0+1h/DL78zPEmBWOIdRQ8vYCvjr7Z+rAZDtkvRpUHhkRg== X-Received: by 2002:a17:903:2042:b0:161:53e9:c7b7 with SMTP id q2-20020a170903204200b0016153e9c7b7mr10510701pla.122.1652707050974; Mon, 16 May 2022 06:17:30 -0700 (PDT) Received: from localhost ([47.251.4.198]) by smtp.gmail.com with ESMTPSA id s17-20020a17090302d100b0015e8d4eb243sm7006029plk.141.2022.05.16.06.17.30 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 16 May 2022 06:17:30 -0700 (PDT) From: Lai Jiangshan To: linux-kernel@vger.kernel.org Cc: Borislav Petkov , Peter Zijlstra , Josh Poimboeuf , Andy Lutomirski , Thomas Gleixner , x86@kernel.org, Lai Jiangshan , Ingo Molnar , Dave Hansen , "H. Peter Anvin" , "Chang S. Bae" , Kees Cook , "Kirill A. Shutemov" , Fenghua Yu Subject: [PATCH V2 7/7] x86/entry: Implement the whole error_entry() as C code Date: Mon, 16 May 2022 21:17:38 +0800 Message-Id: <20220516131739.521817-8-jiangshanlai@gmail.com> X-Mailer: git-send-email 2.19.1.6.gb485710b In-Reply-To: <20220516131739.521817-1-jiangshanlai@gmail.com> References: <20220516131739.521817-1-jiangshanlai@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Lai Jiangshan Implement error_entry() as C code in arch/x86/entry/entry64.c and replace the ASM version of error_entry(). The code might be in the user CR3 and user GS base at the start of the function so it calls __always_inline C function only until the GS and CR3 is switched. No functional change intended and comments are also copied. The C version generally has better readability and easier to be updated/improved. Note: To avoid using goto, the C code has two call sites of sync_regs(). It calls sync_regs() directly after fixup_bad_iret() returns while the ASM code uses JMP instruction to jump to the start of the first call site. The complier uses tail-call-optimization for calling sync_regs(). It uses "JMP sync_regs" while the ASM code uses "CALL+RET". Signed-off-by: Lai Jiangshan --- arch/x86/entry/entry64.c | 69 +++++++++++++++++++++++++++++ arch/x86/entry/entry_64.S | 85 ++---------------------------------- arch/x86/include/asm/proto.h | 1 + arch/x86/include/asm/traps.h | 1 + arch/x86/kernel/traps.c | 2 - 5 files changed, 74 insertions(+), 84 deletions(-) diff --git a/arch/x86/entry/entry64.c b/arch/x86/entry/entry64.c index f7f23800cee4..bd047c329622 100644 --- a/arch/x86/entry/entry64.c +++ b/arch/x86/entry/entry64.c @@ -13,6 +13,8 @@ */ #include =20 +extern unsigned char asm_load_gs_index_gs_change[]; + #ifdef CONFIG_PAGE_TABLE_ISOLATION static __always_inline void pti_switch_to_kernel_cr3(unsigned long user_cr= 3) { @@ -66,3 +68,70 @@ static __always_inline void user_entry_swapgs_and_fence(= void) native_swapgs(); fence_swapgs_user_entry(); } + +/* + * Put pt_regs onto the task stack and switch GS and CR3 if needed. + * The actual stack switch is done in entry_64.S. + * + * Be careful, it might be in the user CR3 and user GS base at the start + * of the function. + */ +asmlinkage __visible __entry_text +struct pt_regs *error_entry(struct pt_regs *eregs) +{ + unsigned long iret_ip =3D (unsigned long)native_irq_return_iret; + + if (user_mode(eregs)) { + /* + * We entered from user mode. + * Switch to kernel gsbase and CR3. + */ + user_entry_swapgs_and_fence(); + switch_to_kernel_cr3(); + + /* Put pt_regs onto the task stack. */ + return sync_regs(eregs); + } + + /* + * There are two places in the kernel that can potentially fault with + * usergs. Handle them here. B stepping K8s sometimes report a + * truncated RIP for IRET exceptions returning to compat mode. Check + * for these here too. + */ + if ((eregs->ip =3D=3D iret_ip) || (eregs->ip =3D=3D (unsigned int)iret_ip= )) { + eregs->ip =3D iret_ip; /* Fix truncated RIP */ + + /* + * We came from an IRET to user mode, so we have user + * gsbase and CR3. Switch to kernel gsbase and CR3: + */ + user_entry_swapgs_and_fence(); + switch_to_kernel_cr3(); + + /* + * Pretend that the exception came from user mode: set up + * pt_regs as if we faulted immediately after IRET and then + * put pt_regs onto the real task stack. + */ + return sync_regs(fixup_bad_iret(eregs)); + } + + /* + * Hack: asm_load_gs_index_gs_change can fail with user gsbase. + * If this happens, fix up gsbase and proceed. We'll fix up the + * exception and land in asm_load_gs_index_gs_change's error + * handler with kernel gsbase. + */ + if (eregs->ip =3D=3D (unsigned long)asm_load_gs_index_gs_change) + native_swapgs(); + + /* + * Issue an LFENCE to prevent GS speculation, regardless of whether + * it is a kernel or user gsbase. + */ + fence_swapgs_kernel_entry(); + + /* Enter from kernel, don't move pt_regs */ + return eregs; +} diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 3a1e3f215617..b678189b029e 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -343,7 +343,7 @@ SYM_CODE_END(push_and_clear_regs) * own pvops for IRET and load_gs_index(). And it doesn't need to * switch the CR3. So it can skip invoking error_entry(). */ - ALTERNATIVE "call error_entry; movq %rax, %rsp", \ + ALTERNATIVE "movq %rsp, %rdi; call error_entry; movq %rax, %rsp", \ "", X86_FEATURE_XENPV =20 ENCODE_FRAME_POINTER @@ -778,7 +778,7 @@ _ASM_NOKPROBE(common_interrupt_return) SYM_FUNC_START(asm_load_gs_index) FRAME_BEGIN swapgs -.Lgs_change: +SYM_INNER_LABEL(asm_load_gs_index_gs_change, SYM_L_GLOBAL) ANNOTATE_NOENDBR // error_entry movl %edi, %gs 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE @@ -799,7 +799,7 @@ SYM_FUNC_START(asm_load_gs_index) movl %eax, %gs jmp 2b =20 - _ASM_EXTABLE(.Lgs_change, .Lbad_gs) + _ASM_EXTABLE(asm_load_gs_index_gs_change, .Lbad_gs) =20 SYM_FUNC_END(asm_load_gs_index) EXPORT_SYMBOL(asm_load_gs_index) @@ -1006,85 +1006,6 @@ SYM_CODE_START_LOCAL(paranoid_exit) jmp restore_regs_and_return_to_kernel SYM_CODE_END(paranoid_exit) =20 -/* - * Switch GS and CR3 if needed. - */ -SYM_CODE_START_LOCAL(error_entry) - UNWIND_HINT_FUNC - testb $3, CS+8(%rsp) - jz .Lerror_kernelspace - - /* - * We entered from user mode or we're pretending to have entered - * from user mode due to an IRET fault. - */ - swapgs - FENCE_SWAPGS_USER_ENTRY - /* We have user CR3. Change to kernel CR3. */ - SWITCH_TO_KERNEL_CR3 scratch_reg=3D%rax - - leaq 8(%rsp), %rdi /* arg0 =3D pt_regs pointer */ -.Lerror_entry_from_usermode_after_swapgs: - /* Put us onto the real thread stack. */ - call sync_regs - RET - - /* - * There are two places in the kernel that can potentially fault with - * usergs. Handle them here. B stepping K8s sometimes report a - * truncated RIP for IRET exceptions returning to compat mode. Check - * for these here too. - */ -.Lerror_kernelspace: - leaq native_irq_return_iret(%rip), %rcx - cmpq %rcx, RIP+8(%rsp) - je .Lerror_bad_iret - movl %ecx, %eax /* zero extend */ - cmpq %rax, RIP+8(%rsp) - je .Lbstep_iret - cmpq $.Lgs_change, RIP+8(%rsp) - jne .Lerror_entry_done_lfence - - /* - * hack: .Lgs_change can fail with user gsbase. If this happens, fix up - * gsbase and proceed. We'll fix up the exception and land in - * .Lgs_change's error handler with kernel gsbase. - */ - swapgs - - /* - * Issue an LFENCE to prevent GS speculation, regardless of whether it is= a - * kernel or user gsbase. - */ -.Lerror_entry_done_lfence: - FENCE_SWAPGS_KERNEL_ENTRY - leaq 8(%rsp), %rax /* return pt_regs pointer */ - RET - -.Lbstep_iret: - /* Fix truncated RIP */ - movq %rcx, RIP+8(%rsp) - /* fall through */ - -.Lerror_bad_iret: - /* - * We came from an IRET to user mode, so we have user - * gsbase and CR3. Switch to kernel gsbase and CR3: - */ - swapgs - FENCE_SWAPGS_USER_ENTRY - SWITCH_TO_KERNEL_CR3 scratch_reg=3D%rax - - /* - * Pretend that the exception came from user mode: set up pt_regs - * as if we faulted immediately after IRET. - */ - leaq 8(%rsp), %rdi /* arg0 =3D pt_regs pointer */ - call fixup_bad_iret - mov %rax, %rdi - jmp .Lerror_entry_from_usermode_after_swapgs -SYM_CODE_END(error_entry) - SYM_CODE_START_LOCAL(error_return) UNWIND_HINT_REGS DEBUG_ENTRY_ASSERT_IRQS_OFF diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 0f899c8d7a4e..95d6d3a53cd5 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -13,6 +13,7 @@ void syscall_init(void); #ifdef CONFIG_X86_64 void entry_SYSCALL_64(void); void entry_SYSCALL_64_safe_stack(void); +extern unsigned char native_irq_return_iret[]; long do_arch_prctl_64(struct task_struct *task, int option, unsigned long = arg2); #endif =20 diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 47ecfff2c83d..2d00100d3e03 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -14,6 +14,7 @@ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *ere= gs); asmlinkage __visible notrace struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs); +asmlinkage __visible notrace struct pt_regs *error_entry(struct pt_regs *e= regs); void __init trap_init(void); asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_r= egs *eregs); #endif diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d62b2cb85cea..f76a15f654c5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -436,8 +436,6 @@ DEFINE_IDTENTRY_DF(exc_double_fault) #endif =20 #ifdef CONFIG_X86_ESPFIX64 - extern unsigned char native_irq_return_iret[]; - /* * If IRET takes a non-IST fault on the espfix64 stack, then we * end up promoting it to a doublefault. In that case, take --=20 2.19.1.6.gb485710b