From nobody Sat May 18 20:15:14 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 27067C77B73 for ; Tue, 6 Jun 2023 09:56:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237361AbjFFJ4k (ORCPT ); Tue, 6 Jun 2023 05:56:40 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58698 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236904AbjFFJ4f (ORCPT ); Tue, 6 Jun 2023 05:56:35 -0400 Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 999E4E55 for ; Tue, 6 Jun 2023 02:56:32 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1686045392; x=1717581392; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=lsSlrIZGg5tq5EdHS+s864ysGMsAzauBzAwak0CSiJ0=; b=RTn9U/AzAh71uZSMf6DhJoSYWTXFI6wgJllrBTXbo9aofhD8xqCO18xW GqCdQlTdMG/FLw2U7sKTLqnmvoEmiJVrjz66EtMeYjwMBzbcQtA//81TL nfEbjNEeismNafVumsotTvY9tT3X8SoOl0pDaoi93ZBBS/xQIbkDxSJ9Q yKWxm+Z0HHm9MgRxO1/f4Z2xdqWkIC8+iP8d/V20RNfHsHG7oaE8ZuNQF 9NbAtzRDs9HhEZoHTiEP8vszy1psEMwHuFWrDb1/wqmbX0grm7wl4iCeD oOZz9jishd7jUX/qjTd7UdmWspv1tQeXjanBDypg+RNvgdJV4t2gm6Yq+ w==; X-IronPort-AV: E=McAfee;i="6600,9927,10732"; a="336249099" X-IronPort-AV: E=Sophos;i="6.00,219,1681196400"; d="scan'208";a="336249099" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Jun 2023 02:56:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10732"; a="853348527" X-IronPort-AV: E=Sophos;i="6.00,219,1681196400"; d="scan'208";a="853348527" Received: from rgraefe-mobl1.ger.corp.intel.com (HELO box.shutemov.name) ([10.252.58.173]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Jun 2023 02:56:29 -0700 Received: by box.shutemov.name (Postfix, from userid 1000) id A64D710CFD2; Tue, 6 Jun 2023 12:56:26 +0300 (+03) From: "Kirill A. Shutemov" To: dave.hansen@intel.com, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de Cc: decui@microsoft.com, rick.p.edgecombe@intel.com, sathyanarayanan.kuppuswamy@linux.intel.com, seanjc@google.com, thomas.lendacky@amd.com, x86@kernel.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" Subject: [PATCHv3 1/3] x86/mm: Allow guest.enc_status_change_prepare() to fail Date: Tue, 6 Jun 2023 12:56:20 +0300 Message-Id: <20230606095622.1939-2-kirill.shutemov@linux.intel.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: <20230606095622.1939-1-kirill.shutemov@linux.intel.com> References: <20230606095622.1939-1-kirill.shutemov@linux.intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" TDX code is going to provide guest.enc_status_change_prepare() that is able to fail. TDX will use the call to convert the GPA range from shared to private. This operation can fail. Add a way to return an error from the callback. Signed-off-by: Kirill A. Shutemov Reviewed-by: Kuppuswamy Sathyanarayanan --- arch/x86/include/asm/x86_init.h | 2 +- arch/x86/kernel/x86_init.c | 2 +- arch/x86/mm/mem_encrypt_amd.c | 4 +++- arch/x86/mm/pat/set_memory.c | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_ini= t.h index 88085f369ff6..1ca9701917c5 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -150,7 +150,7 @@ struct x86_init_acpi { * @enc_cache_flush_required Returns true if a cache flush is needed befor= e changing page encryption status */ struct x86_guest { - void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool e= nc); + bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool e= nc); bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool en= c); bool (*enc_tlb_flush_required)(bool enc); bool (*enc_cache_flush_required)(void); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index d82f4fa2f1bf..f230d4d7d8eb 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -130,7 +130,7 @@ struct x86_cpuinit_ops x86_cpuinit =3D { =20 static void default_nmi_init(void) { }; =20 -static void enc_status_change_prepare_noop(unsigned long vaddr, int npages= , bool enc) { } +static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages= , bool enc) { return true; } static bool enc_status_change_finish_noop(unsigned long vaddr, int npages,= bool enc) { return false; } static bool enc_tlb_flush_required_noop(bool enc) { return false; } static bool enc_cache_flush_required_noop(void) { return false; } diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index e0b51c09109f..4f95c449a406 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -319,7 +319,7 @@ static void enc_dec_hypercall(unsigned long vaddr, int = npages, bool enc) #endif } =20 -static void amd_enc_status_change_prepare(unsigned long vaddr, int npages,= bool enc) +static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages,= bool enc) { /* * To maintain the security guarantees of SEV-SNP guests, make sure @@ -327,6 +327,8 @@ static void amd_enc_status_change_prepare(unsigned long= vaddr, int npages, bool */ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) snp_set_memory_shared(vaddr, npages); + + return true; } =20 /* Return true unconditionally: return value doesn't matter for the SEV si= de */ diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 7159cf787613..b8f48ebe753c 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2151,7 +2151,8 @@ static int __set_memory_enc_pgtable(unsigned long add= r, int numpages, bool enc) cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required()); =20 /* Notify hypervisor that we are about to set/clr encryption attribute. */ - x86_platform.guest.enc_status_change_prepare(addr, numpages, enc); + if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc)) + return -EIO; =20 ret =3D __change_page_attr_set_clr(&cpa, 1); =20 --=20 2.39.3 From nobody Sat May 18 20:15:14 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3EADFC7EE2C for ; Tue, 6 Jun 2023 09:56:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235371AbjFFJ4r (ORCPT ); Tue, 6 Jun 2023 05:56:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58712 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236148AbjFFJ4f (ORCPT ); Tue, 6 Jun 2023 05:56:35 -0400 Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 73565E73 for ; Tue, 6 Jun 2023 02:56:34 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1686045394; x=1717581394; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=7RLFRqvdRV4EeGWD2/bOkXceiG/5lKBnbgdL4IoAVVE=; b=ciV7fRbrLWFPhM/KEYth/Vr28YZKwzcAPL6k7UWq14J93o9Zl0Ekpo4C Xwbxvv4V1/gyZ0cxgJNBC4j9x/ZNBrYtUbvygNIrRFt0spYfMXFQ9GZJv uNumEtDdLWp+q2Ua0igR8cV+2PBw1NLiEUw4tbA7oopeeXq7zF10l+z52 85x8ljd3socjSIK8QheAdwvUyJ7RpEM0Lt9PDtVto/IiW8xHje67b6uok jtaUG1j7515hFrXqsv51mJ1dHrxKUYmocUnWb0G1AK4wDZDPLjgEgYHt2 DPu6fD17W2tzuSijvfVEC8TjB9uOnirNgDvYWm6kSaJhVHz3USnVfYt26 g==; X-IronPort-AV: E=McAfee;i="6600,9927,10732"; a="422445929" X-IronPort-AV: E=Sophos;i="6.00,219,1681196400"; d="scan'208";a="422445929" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Jun 2023 02:56:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10732"; a="738720781" X-IronPort-AV: E=Sophos;i="6.00,219,1681196400"; d="scan'208";a="738720781" Received: from rgraefe-mobl1.ger.corp.intel.com (HELO box.shutemov.name) ([10.252.58.173]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Jun 2023 02:56:29 -0700 Received: by box.shutemov.name (Postfix, from userid 1000) id B154A10D02A; Tue, 6 Jun 2023 12:56:26 +0300 (+03) From: "Kirill A. Shutemov" To: dave.hansen@intel.com, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de Cc: decui@microsoft.com, rick.p.edgecombe@intel.com, sathyanarayanan.kuppuswamy@linux.intel.com, seanjc@google.com, thomas.lendacky@amd.com, x86@kernel.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" Subject: [PATCHv3 2/3] x86/tdx: Fix race between set_memory_encrypted() and load_unaligned_zeropad() Date: Tue, 6 Jun 2023 12:56:21 +0300 Message-Id: <20230606095622.1939-3-kirill.shutemov@linux.intel.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: <20230606095622.1939-1-kirill.shutemov@linux.intel.com> References: <20230606095622.1939-1-kirill.shutemov@linux.intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Touching privately mapped GPA that is not properly converted to private with MapGPA and accepted leads to an unrecoverable exit to VMM. load_unaligned_zeropad() can touch memory that is not owned by the caller, but just happened to next after the owned memory. This load_unaligned_zeropad() behaviour makes it important when kernel asks VMM to convert a GPA from shared to private or back. Kernel must never have a page mapped into direct mapping (and aliases) as private when the GPA is already converted to shared or when GPA is not yet converted to private. load_unaligned_zeropad() can touch memory that is not owned by the caller, but just happens to be next after the owned memory. This load_unaligned_zeropad() behavior makes it important when the kernel asks VMM to convert a GPA from shared to private or back. The kernel must never have a page mapped into direct mapping (and aliases) as private when the GPA is already converted to shared or when the GPA is not yet converted to private. guest.enc_status_change_prepare() is called before adjusting direct mapping and therefore is responsible for converting the memory to private. guest.enc_status_change_finish() is called after adjusting direct mapping and it converts the memory to shared. It is okay to have a shared mapping of memory that is not properly converted. handle_mmio() knows how to deal with load_unaligned_zeropad() stepping on it. Signed-off-by: Kirill A. Shutemov Fixes: 7dbde7631629 ("x86/mm/cpa: Add support for TDX shared memory") Reviewed-by: Kuppuswamy Sathyanarayanan --- arch/x86/coco/tdx/tdx.c | 64 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index e146b599260f..f6213a10de3a 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -840,6 +840,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr= , int numpages, bool enc) return true; } =20 +static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpage= s, + bool enc) +{ + /* + * Only handle shared->private conversion here. + * See the comment in tdx_early_init(). + */ + if (enc) + return tdx_enc_status_changed(vaddr, numpages, enc); + return true; +} + +static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages, + bool enc) +{ + /* + * Only handle private->shared conversion here. + * See the comment in tdx_early_init(). + */ + if (!enc) + return tdx_enc_status_changed(vaddr, numpages, enc); + return true; +} + void __init tdx_early_init(void) { u64 cc_mask; @@ -867,9 +891,43 @@ void __init tdx_early_init(void) */ physical_mask &=3D cc_mask - 1; =20 - x86_platform.guest.enc_cache_flush_required =3D tdx_cache_flush_required; - x86_platform.guest.enc_tlb_flush_required =3D tdx_tlb_flush_required; - x86_platform.guest.enc_status_change_finish =3D tdx_enc_status_changed; + /* + * Touching privately mapped GPA that is not properly converted to + * private with MapGPA and accepted leads to an unrecoverable exit + * to VMM. + * + * load_unaligned_zeropad() can touch memory that is not owned by the + * caller, but just happened to next after the owned memory. + * This load_unaligned_zeropad() behaviour makes it important when + * kernel asks VMM to convert a GPA from shared to private or back. + * Kernel must never have a page mapped into direct mapping (and + * aliases) as private when the GPA is already converted to shared or + * when GPA is not yet converted to private. + * + * load_unaligned_zeropad() can touch memory that is not owned by the + * caller, but just happens to be next after the owned memory. This + * load_unaligned_zeropad() behavior makes it important when the kernel + * asks VMM to convert a GPA from shared to private or back. The kernel + * must never have a page mapped into direct mapping (and aliases) as + * private when the GPA is already converted to shared or when the GPA + * is not yet converted to private. + * + * guest.enc_status_change_prepare() is called before adjusting direct + * mapping and therefore is responsible for converting the memory to + * private. + * + * guest.enc_status_change_finish() is called after adjusting direct + * mapping and it converts the memory to shared. + * + * It is okay to have a shared mapping of memory that is not properly + * converted. handle_mmio() knows how to deal with + * load_unaligned_zeropad() stepping on it. + */ + x86_platform.guest.enc_status_change_prepare =3D tdx_enc_status_change_pr= epare; + x86_platform.guest.enc_status_change_finish =3D tdx_enc_status_change_fi= nish; + + x86_platform.guest.enc_cache_flush_required =3D tdx_cache_flush_required; + x86_platform.guest.enc_tlb_flush_required =3D tdx_tlb_flush_required; =20 pr_info("Guest detected\n"); } --=20 2.39.3 From nobody Sat May 18 20:15:14 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 03834C7EE24 for ; Tue, 6 Jun 2023 09:56:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237347AbjFFJ4i (ORCPT ); Tue, 6 Jun 2023 05:56:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58700 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237308AbjFFJ4f (ORCPT ); Tue, 6 Jun 2023 05:56:35 -0400 Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 27C63E6B for ; Tue, 6 Jun 2023 02:56:32 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1686045393; x=1717581393; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9GElKVgLMZ9LqY2JLMC1Sd5gav9lSYApUQe9KiZ8CuU=; b=X72g7MnftcVRKdd1dpn26C7XfUdg8Yz129ZznK8T3MnkFZFmFGFSEctV 3gLrNiBYofD1LXQoVenbuv0YrsitGT028Abeb6HNrxeFQdmBXZc9DrzlI yr0p62eD28coTZYprC7uHc73uerCtleN7V0naQ3LZ6Havucb93WXUr1OP LY+3tT244hPxwWaWw8qY43P3h8wTvuqwmmdv9jste6iBb5AJM0A3BPruZ oekT8NkZpVE100EYqGSJQWsqAI9pk+uJ6Wv4pICB+8ViSLms1lTq05BOe 7pEvRjLsAkP9bfHSba2Ap1XU6/AjiJIj2FMC92RdQptoL+Wx4MPYHBjSE w==; X-IronPort-AV: E=McAfee;i="6600,9927,10732"; a="422445921" X-IronPort-AV: E=Sophos;i="6.00,219,1681196400"; d="scan'208";a="422445921" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Jun 2023 02:56:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10732"; a="738720779" X-IronPort-AV: E=Sophos;i="6.00,219,1681196400"; d="scan'208";a="738720779" Received: from rgraefe-mobl1.ger.corp.intel.com (HELO box.shutemov.name) ([10.252.58.173]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Jun 2023 02:56:29 -0700 Received: by box.shutemov.name (Postfix, from userid 1000) id BAACC10D348; Tue, 6 Jun 2023 12:56:26 +0300 (+03) From: "Kirill A. Shutemov" To: dave.hansen@intel.com, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de Cc: decui@microsoft.com, rick.p.edgecombe@intel.com, sathyanarayanan.kuppuswamy@linux.intel.com, seanjc@google.com, thomas.lendacky@amd.com, x86@kernel.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" Subject: [PATCHv3 3/3] x86/mm: Fix enc_status_change_finish_noop() Date: Tue, 6 Jun 2023 12:56:22 +0300 Message-Id: <20230606095622.1939-4-kirill.shutemov@linux.intel.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: <20230606095622.1939-1-kirill.shutemov@linux.intel.com> References: <20230606095622.1939-1-kirill.shutemov@linux.intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" enc_status_change_finish_noop() is now defined as always-fail, which doesn't make sense for noop. The change has no user-visible effect because it is only called if the platform has CC_ATTR_MEM_ENCRYPT. All platforms with the attribute override the callback with their own implementation. Signed-off-by: Kirill A. Shutemov Reviewed-by: Kuppuswamy Sathyanarayanan --- arch/x86/kernel/x86_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index f230d4d7d8eb..64664311ac2b 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -131,7 +131,7 @@ struct x86_cpuinit_ops x86_cpuinit =3D { static void default_nmi_init(void) { }; =20 static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages= , bool enc) { return true; } -static bool enc_status_change_finish_noop(unsigned long vaddr, int npages,= bool enc) { return false; } +static bool enc_status_change_finish_noop(unsigned long vaddr, int npages,= bool enc) { return true; } static bool enc_tlb_flush_required_noop(bool enc) { return false; } static bool enc_cache_flush_required_noop(void) { return false; } static bool is_private_mmio_noop(u64 addr) {return false; } --=20 2.39.3