From nobody Fri Sep 12 08:30:01 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 22C29C636CC for ; Mon, 13 Feb 2023 10:34:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230388AbjBMKek (ORCPT ); Mon, 13 Feb 2023 05:34:40 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40896 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229567AbjBMKef (ORCPT ); Mon, 13 Feb 2023 05:34:35 -0500 Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 4C802DBF6; Mon, 13 Feb 2023 02:34:33 -0800 (PST) Received: from vm02.corp.microsoft.com (unknown [167.220.196.155]) by linux.microsoft.com (Postfix) with ESMTPSA id 49EE420C8B73; Mon, 13 Feb 2023 02:34:30 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 49EE420C8B73 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1676284472; bh=0f3V5nPDxmg11RJ8oq9LMHRMj/eCJ6zpTgfupRm3u00=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=bsaUHTv6b+3sbEl+Ft5POXdghpjuPy5HJ3bT8pxUZgFfpsngZ5ECQ2Vfe6M6isvFm 0r5KHxawxvb4R+ShERIl789T3wRdtpdvxGLCg4GPyRbs+tuKpMZLR6TlcH4nXVYc0g 7NqDxwucntQp4sLJHs8txoMsKPVJLZ3yKBRp+/Tc= From: Jeremi Piotrowski To: linux-kernel@vger.kernel.org Cc: Jeremi Piotrowski , Wei Liu , Dexuan Cui , Tianyu Lan , Michael Kelley , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, linux-hyperv@vger.kernel.org, Brijesh Singh , Michael Roth , Ashish Kalra , Tom Lendacky Subject: [RFC PATCH v2 3/7] x86/sev: Maintain shadow rmptable on Hyper-V Date: Mon, 13 Feb 2023 10:33:58 +0000 Message-Id: <20230213103402.1189285-4-jpiotrowski@linux.microsoft.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230213103402.1189285-1-jpiotrowski@linux.microsoft.com> References: <20230213103402.1189285-1-jpiotrowski@linux.microsoft.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Hyper-V can expose the SEV-SNP feature to guests, and manages the system-wide RMP (Reverse Map) table. The SNP implementation in the kernel needs access to the rmptable for tracking pages and deciding when/how to issue rmpupdate/psmash. When running as a Hyper-V guest with SNP support, an rmptable is allocated by the kernel during boot for this purpose. Keep the table in sync with issued rmpupdate/psmash instructions. The logic for how to update the rmptable comes from "AMD64 Architecture Programmer=E2=80=99s Manual, Volume 3" which describes the psmash and rmpup= date instructions. To ensure correctness of the SNP host code, the most important fields are "assigned" and "page size". Signed-off-by: Jeremi Piotrowski --- arch/x86/include/asm/sev.h | 4 ++ arch/x86/kernel/cpu/mshyperv.c | 2 + arch/x86/kernel/sev.c | 69 ++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+) diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index db5438663229..4d3591ebff5d 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -218,6 +218,8 @@ int psmash(u64 pfn); int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool= immutable); int rmp_make_shared(u64 pfn, enum pg_level level); void sev_dump_rmpentry(u64 pfn); +bool snp_soft_rmptable(void); +void __init snp_set_soft_rmptable(void); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } @@ -251,6 +253,8 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, en= um pg_level level, int as } static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -= ENODEV; } static inline void sev_dump_rmpentry(u64 pfn) {} +static inline bool snp_soft_rmptable(void) { return false; } +static inline void __init snp_set_soft_rmptable(void) {} #endif =20 #endif diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 777c9d812dfa..101c38e9cae7 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -530,6 +530,8 @@ static void __init ms_hyperv_init_mem_mapping(void) wrmsrl(MSR_AMD64_RMP_BASE, rmp_res.start); wrmsrl(MSR_AMD64_RMP_END, rmp_res.end); insert_resource(&iomem_resource, &rmp_res); + + snp_set_soft_rmptable(); } =20 const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv =3D { diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index ad09dd3747a1..712f1a9623ce 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -2566,6 +2566,22 @@ int snp_lookup_rmpentry(u64 pfn, int *level) } EXPORT_SYMBOL_GPL(snp_lookup_rmpentry); =20 +static bool soft_rmptable __ro_after_init; + +/* + * Test if the rmptable needs to be managed by software and is not maintai= ned by + * (virtualized) hardware. + */ +bool snp_soft_rmptable(void) +{ + return soft_rmptable; +} + +void __init snp_set_soft_rmptable(void) +{ + soft_rmptable =3D true; +} + static bool virt_snp_msr(void) { return boot_cpu_has(X86_FEATURE_NESTED_VIRT_SNP_MSR); @@ -2592,6 +2608,26 @@ static u64 virt_psmash(u64 paddr) return ret; } =20 +static void snp_update_rmptable_psmash(u64 pfn) +{ + int level; + struct rmpentry *entry =3D __snp_lookup_rmpentry(pfn, &level); + + if (WARN_ON(IS_ERR_OR_NULL(entry))) + return; + + if (level =3D=3D PG_LEVEL_2M) { + int i; + + entry->info.pagesize =3D RMP_PG_SIZE_4K; + for (i =3D 1; i < PTRS_PER_PMD; i++) { + struct rmpentry *it =3D &entry[i]; + *it =3D *entry; + it->info.gpa =3D entry->info.gpa + i * PAGE_SIZE; + } + } +} + /* * psmash is used to smash a 2MB aligned page into 4K * pages while preserving the Validated bit in the RMP. @@ -2609,6 +2645,8 @@ int psmash(u64 pfn) =20 if (virt_snp_msr()) { ret =3D virt_psmash(paddr); + if (!ret && snp_soft_rmptable()) + snp_update_rmptable_psmash(pfn); } else { /* Binutils version 2.36 supports the PSMASH mnemonic. */ asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF" @@ -2656,6 +2694,35 @@ static u64 virt_rmpupdate(unsigned long paddr, struc= t rmp_state *val) return ret; } =20 +static void snp_update_rmptable_rmpupdate(u64 pfn, int level, struct rmp_s= tate *val) +{ + int prev_level; + struct rmpentry *entry =3D __snp_lookup_rmpentry(pfn, &prev_level); + + if (WARN_ON(IS_ERR_OR_NULL(entry))) + return; + + if (level > PG_LEVEL_4K) { + int i; + struct rmpentry tmp_rmp =3D { + .info =3D { + .assigned =3D val->assigned, + }, + }; + for (i =3D 1; i < PTRS_PER_PMD; i++) + entry[i] =3D tmp_rmp; + } + if (!val->assigned) { + memset(entry, 0, sizeof(*entry)); + } else { + entry->info.assigned =3D val->assigned; + entry->info.pagesize =3D val->pagesize; + entry->info.immutable =3D val->immutable; + entry->info.gpa =3D val->gpa; + entry->info.asid =3D val->asid; + } +} + static int rmpupdate(u64 pfn, struct rmp_state *val) { unsigned long paddr =3D pfn << PAGE_SHIFT; @@ -2684,6 +2751,8 @@ static int rmpupdate(u64 pfn, struct rmp_state *val) =20 if (virt_snp_msr()) { ret =3D virt_rmpupdate(paddr, val); + if (!ret && snp_soft_rmptable()) + snp_update_rmptable_rmpupdate(pfn, level, val); } else { /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */ asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE" --=20 2.25.1