From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E4065C4332F for ; Mon, 21 Feb 2022 16:23:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380310AbiBUQXh (ORCPT ); Mon, 21 Feb 2022 11:23:37 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53422 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380201AbiBUQXP (ORCPT ); Mon, 21 Feb 2022 11:23:15 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 1EDE12716B for ; Mon, 21 Feb 2022 08:22:50 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460569; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=2XJ0yshtegx69ruzpbBfy1QKJqb0fPwNdzHSAhgukq8=; b=hQb4rAcKJxSVJ03FpoQmvFHVhW1+sRPl3Z8clufF782SgnbXGjKRTWVKpV1z5gnCDWzLZf KnvyZwECReyCJDnG9rnO5OwkrAXT1MWpxosc+1t9NjaRUSPk4s4mHpX2Stah/Q3Qw6bmrT ZbJtsBEQT5e55MMpjJViwki2I6z1E58= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-140-eXY4jakrOzSVHFt96THSvA-1; Mon, 21 Feb 2022 11:22:45 -0500 X-MC-Unique: eXY4jakrOzSVHFt96THSvA-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 7232B1006AA3; Mon, 21 Feb 2022 16:22:44 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 1577B77474; Mon, 21 Feb 2022 16:22:44 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 01/25] KVM: x86/mmu: avoid indirect call for get_cr3 Date: Mon, 21 Feb 2022 11:22:19 -0500 Message-Id: <20220221162243.683208-2-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Most of the time, calls to get_guest_pgd result in calling kvm_read_cr3 (the exception is only nested TDP). Check if that is the case if retpolines are enabled, thus avoiding an expensive indirect call. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 10 ++++++++++ arch/x86/kvm/mmu/mmu.c | 15 ++++++++------- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/x86.c | 2 +- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 1d0c1904d69a..6ee4436e46f1 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -116,6 +116,16 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *v= cpu) vcpu->arch.mmu->shadow_root_level); } =20 +extern unsigned long kvm_get_guest_cr3(struct kvm_vcpu *vcpu); +static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, s= truct kvm_mmu *mmu) +{ +#ifdef CONFIG_RETPOLINE + if (mmu->get_guest_pgd =3D=3D kvm_get_guest_cr3) + return kvm_read_cr3(vcpu); +#endif + return mmu->get_guest_pgd(vcpu); +} + struct kvm_page_fault { /* arguments to kvm_mmu_do_page_fault. */ const gpa_t addr; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b2c1c4eb6007..7051040e15b3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3435,7 +3435,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) unsigned i; int r; =20 - root_pgd =3D mmu->get_guest_pgd(vcpu); + root_pgd =3D kvm_mmu_get_guest_pgd(vcpu, mmu); root_gfn =3D root_pgd >> PAGE_SHIFT; =20 if (mmu_check_root(vcpu, root_gfn)) @@ -3854,12 +3854,13 @@ static void shadow_page_table_clear_flood(struct kv= m_vcpu *vcpu, gva_t addr) static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gp= a, gfn_t gfn) { + struct kvm_mmu *mmu =3D vcpu->arch.mmu; struct kvm_arch_async_pf arch; =20 arch.token =3D (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.gfn =3D gfn; - arch.direct_map =3D vcpu->arch.mmu->direct_map; - arch.cr3 =3D vcpu->arch.mmu->get_guest_pgd(vcpu); + arch.direct_map =3D mmu->direct_map; + arch.cr3 =3D kvm_mmu_get_guest_pgd(vcpu, mmu); =20 return kvm_setup_async_pf(vcpu, cr2_or_gpa, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); @@ -4208,7 +4209,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new= _pgd) } EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); =20 -static unsigned long get_cr3(struct kvm_vcpu *vcpu) +unsigned long kvm_get_guest_cr3(struct kvm_vcpu *vcpu) { return kvm_read_cr3(vcpu); } @@ -4767,7 +4768,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->invlpg =3D NULL; context->shadow_root_level =3D kvm_mmu_get_tdp_level(vcpu); context->direct_map =3D true; - context->get_guest_pgd =3D get_cr3; + context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault; context->root_level =3D role_regs_to_root_level(®s); @@ -4942,7 +4943,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) =20 kvm_init_shadow_mmu(vcpu, ®s); =20 - context->get_guest_pgd =3D get_cr3; + context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault; } @@ -4974,7 +4975,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) return; =20 g_context->mmu_role.as_u64 =3D new_role.as_u64; - g_context->get_guest_pgd =3D get_cr3; + g_context->get_guest_pgd =3D kvm_get_guest_cr3; g_context->get_pdptr =3D kvm_pdptr_read; g_context->inject_page_fault =3D kvm_inject_page_fault; g_context->root_level =3D new_role.base.level; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 252c77805eb9..80b4b291002a 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -362,7 +362,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker= *walker, trace_kvm_mmu_pagetable_walk(addr, access); retry_walk: walker->level =3D mmu->root_level; - pte =3D mmu->get_guest_pgd(vcpu); + pte =3D kvm_mmu_get_guest_pgd(vcpu, mmu); have_ad =3D PT_HAVE_ACCESSED_DIRTY(mmu); =20 #if PTTYPE =3D=3D 64 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6552360d8888..da33d3a88a8d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12190,7 +12190,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcp= u, struct kvm_async_pf *work) return; =20 if (!vcpu->arch.mmu->direct_map && - work->arch.cr3 !=3D vcpu->arch.mmu->get_guest_pgd(vcpu)) + work->arch.cr3 !=3D kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu)) return; =20 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AC174C433FE for ; Mon, 21 Feb 2022 16:23:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380270AbiBUQX0 (ORCPT ); Mon, 21 Feb 2022 11:23:26 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53384 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380193AbiBUQXN (ORCPT ); Mon, 21 Feb 2022 11:23:13 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 828AE27170 for ; Mon, 21 Feb 2022 08:22:50 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460569; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=s1Gzsv9tYBkuF5sOn5S6HL4uRB3TRk8wp+l3AAt1BvU=; b=CoR4rQOqShIB1GUHwkTt6NxF6LjVyyQsXuZRGg6LAj8T3DM9rW22S+N4RmUk6utyeAV6gB IQH+Cym1tlZqYkiVylo1Lm7Me/pXRF/mOPRR9nMVYprxKnvoJVRQv8LVd1KacRBKjNrP1t 2JMZ+8V4GYHrExCvDk3o+94MGtIyVtQ= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-591-1vr4Ohg_PUGXbIkjSjiNBg-1; Mon, 21 Feb 2022 11:22:46 -0500 X-MC-Unique: 1vr4Ohg_PUGXbIkjSjiNBg-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id EC92E1006AA5; Mon, 21 Feb 2022 16:22:44 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 8CE4777468; Mon, 21 Feb 2022 16:22:44 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 02/25] KVM: x86/mmu: nested EPT cannot be used in SMM Date: Mon, 21 Feb 2022 11:22:20 -0500 Message-Id: <20220221162243.683208-3-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The role.base.smm flag is always zero when setting up shadow EPT, do not bother copying it over from vcpu->arch.root_mmu. Reviewed-by: David Matlack Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7051040e15b3..4f9bbd02fb8b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4886,9 +4886,11 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *= vcpu, bool accessed_dirty, { union kvm_mmu_role role =3D {0}; =20 - /* SMM flag is inherited from root_mmu */ - role.base.smm =3D vcpu->arch.root_mmu.mmu_role.base.smm; - + /* + * KVM does not support SMM transfer monitors, and consequently does not + * support the "entry to SMM" control either. role.base.smm is always 0. + */ + WARN_ON_ONCE(is_smm(vcpu)); role.base.level =3D level; role.base.has_4_byte_gpte =3D false; role.base.direct =3D false; --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C6937C433EF for ; Mon, 21 Feb 2022 16:23:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380299AbiBUQXd (ORCPT ); Mon, 21 Feb 2022 11:23:33 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53426 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380203AbiBUQXP (ORCPT ); Mon, 21 Feb 2022 11:23:15 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 48B722717D for ; Mon, 21 Feb 2022 08:22:51 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460570; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=vvmTg7zrEdwEArlt/5g3tbeZl6wapS1zW/jokMqAmyM=; b=iRz8b0GKskjYG/zQwDTcadMSR2x/ft3HciUEYPjopzbwLWr9xyKbtSYL5NFfvhDJcTYtFi ugkOuxhsEhnEkijl9Jt9XHWy2If+Tmq7T5ogybsuxK53UN8KD9sJwpLnAn7BFnBKLkqoCc 6ELZcqxrN2F0TUjJcM1nrTJoVWKL2t4= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-338-Rh47UOTvOlC4qG8vAQoSNg-1; Mon, 21 Feb 2022 11:22:46 -0500 X-MC-Unique: Rh47UOTvOlC4qG8vAQoSNg-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 6C0221926DA3; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 0F67977468; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 03/25] KVM: x86/mmu: constify uses of struct kvm_mmu_role_regs Date: Mon, 21 Feb 2022 11:22:21 -0500 Message-Id: <20220221162243.683208-4-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" struct kvm_mmu_role_regs is computed just once and then accessed. Use const to make this clearer, even though the const fields of struct kvm_mmu_role_regs already prevent modifications to the contents of the struct, or rather make them harder. Reviewed-by: David Matlack Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4f9bbd02fb8b..97566ac539e3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -197,7 +197,7 @@ struct kvm_mmu_role_regs { * the single source of truth for the MMU's state. */ #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \ -static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_rol= e_regs *regs)\ +static inline bool __maybe_unused ____is_##reg##_##name(const struct kvm_m= mu_role_regs *regs)\ { \ return !!(regs->reg & flag); \ } @@ -244,7 +244,7 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struc= t kvm_vcpu *vcpu) return regs; } =20 -static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs) +static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs) { if (!____is_cr0_pg(regs)) return 0; @@ -4681,7 +4681,7 @@ static void paging32_init_context(struct kvm_mmu *con= text) } =20 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *= vcpu, - struct kvm_mmu_role_regs *regs) + const struct kvm_mmu_role_regs *regs) { union kvm_mmu_extended_role ext =3D {0}; =20 @@ -4704,7 +4704,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_= ext(struct kvm_vcpu *vcpu, } =20 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, + const struct kvm_mmu_role_regs *regs, bool base_only) { union kvm_mmu_role role =3D {0}; @@ -4740,7 +4740,8 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vc= pu *vcpu) =20 static union kvm_mmu_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, bool base_only) + const struct kvm_mmu_role_regs *regs, + bool base_only) { union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs, base_onl= y); =20 @@ -4786,7 +4787,8 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) =20 static union kvm_mmu_role kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, bool base_only) + const struct kvm_mmu_role_regs *regs, + bool base_only) { union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs, base_onl= y); =20 @@ -4799,7 +4801,8 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu= *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, bool base_only) + const struct kvm_mmu_role_regs *regs, + bool base_only) { union kvm_mmu_role role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only); @@ -4817,7 +4820,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *v= cpu, } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, - struct kvm_mmu_role_regs *regs, + const struct kvm_mmu_role_regs *regs, union kvm_mmu_role new_role) { if (new_role.as_u64 =3D=3D context->mmu_role.as_u64) @@ -4840,7 +4843,7 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte } =20 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs) + const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role new_role =3D @@ -4851,7 +4854,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs) + const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs, false); @@ -4951,7 +4954,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) } =20 static union kvm_mmu_role -kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *= regs) +kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_= regs *regs) { union kvm_mmu_role role; =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 73208C433F5 for ; Mon, 21 Feb 2022 16:23:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380332AbiBUQXo (ORCPT ); Mon, 21 Feb 2022 11:23:44 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53404 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380194AbiBUQXP (ORCPT ); Mon, 21 Feb 2022 11:23:15 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 5318D275C2 for ; Mon, 21 Feb 2022 08:22:51 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460570; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=FTGaq+J16vxVuhexbjLIY//LQWKKBD2mHulNQ/IMUkQ=; b=SzYcdwtDxpiwZPef4CseygzguRdMCD5lHdfgY0B/LkjJXRSTATRvKAoOXRz8BdRO+hiBDt I7KrBjOIlHf0VM3WKpJmlK8j1N1jkb3iAB8qIpAcvehMxRIZzTxLAqDHWjczf8vc/Jsvhz DtPlJXndvE6nhyTfGpZPaouWPwGnulw= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-613-1bNqFLkCMx6UFzMmAR0Gkw-1; Mon, 21 Feb 2022 11:22:47 -0500 X-MC-Unique: 1bNqFLkCMx6UFzMmAR0Gkw-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id E3F5C2F4A; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 86AB577468; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 04/25] KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu Date: Mon, 21 Feb 2022 11:22:22 -0500 Message-Id: <20220221162243.683208-5-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The init_kvm_*mmu functions, with the exception of shadow NPT, do not need to know the full values of CR0/CR4/EFER; they only need to know the bits that make up the "role". This cleanup however will take quite a few incremental steps. As a start, pull the common computation of the struct kvm_mmu_role_regs into their caller: all of them extract the struct from the vcpu as the very first step. Reviewed-by: David Matlack Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 97566ac539e3..0e393506f4df 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4753,12 +4753,12 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vc= pu, return role; } =20 -static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, + const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - struct kvm_mmu_role_regs regs =3D vcpu_to_role_regs(vcpu); union kvm_mmu_role new_role =3D - kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, false); + kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false); =20 if (new_role.as_u64 =3D=3D context->mmu_role.as_u64) return; @@ -4772,7 +4772,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault; - context->root_level =3D role_regs_to_root_level(®s); + context->root_level =3D role_regs_to_root_level(regs); =20 if (!is_cr0_pg(context)) context->gva_to_gpa =3D nonpaging_gva_to_gpa; @@ -4941,12 +4941,12 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu,= bool execonly, } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); =20 -static void init_kvm_softmmu(struct kvm_vcpu *vcpu) +static void init_kvm_softmmu(struct kvm_vcpu *vcpu, + const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - struct kvm_mmu_role_regs regs =3D vcpu_to_role_regs(vcpu); =20 - kvm_init_shadow_mmu(vcpu, ®s); + kvm_init_shadow_mmu(vcpu, regs); =20 context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; @@ -4970,10 +4970,9 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, cons= t struct kvm_mmu_role_regs * return role; } =20 -static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mm= u_role_regs *regs) { - struct kvm_mmu_role_regs regs =3D vcpu_to_role_regs(vcpu); - union kvm_mmu_role new_role =3D kvm_calc_nested_mmu_role(vcpu, ®s); + union kvm_mmu_role new_role =3D kvm_calc_nested_mmu_role(vcpu, regs); struct kvm_mmu *g_context =3D &vcpu->arch.nested_mmu; =20 if (new_role.as_u64 =3D=3D g_context->mmu_role.as_u64) @@ -5013,12 +5012,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vc= pu) =20 void kvm_init_mmu(struct kvm_vcpu *vcpu) { + struct kvm_mmu_role_regs regs =3D vcpu_to_role_regs(vcpu); + if (mmu_is_nested(vcpu)) - init_kvm_nested_mmu(vcpu); + init_kvm_nested_mmu(vcpu, ®s); else if (tdp_enabled) - init_kvm_tdp_mmu(vcpu); + init_kvm_tdp_mmu(vcpu, ®s); else - init_kvm_softmmu(vcpu); + init_kvm_softmmu(vcpu, ®s); } EXPORT_SYMBOL_GPL(kvm_init_mmu); =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 468FEC43217 for ; Mon, 21 Feb 2022 16:22:59 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380199AbiBUQXU (ORCPT ); Mon, 21 Feb 2022 11:23:20 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53376 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380192AbiBUQXN (ORCPT ); Mon, 21 Feb 2022 11:23:13 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id E63F927152 for ; Mon, 21 Feb 2022 08:22:49 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460568; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=Cbki0TF8g05gfWmutcV9pyLiWp15y56GQBu92FXt0z8=; b=WPmIbPLnnzH/+zO0qSGMW0dneRlYyx67R1zgoNae+5lG9zl5oJ0yUGGA4oEhhmDo2Kbznx ybQwNOxOhQ/KGPNYUl2m/kEpvBUoIEhOhMaQLSz2mMXKXDpLfaV42+rFSft+Djz8suldOu ypYkWnHOk1KF5+SvmVy80WaxQQ/gGWU= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-655-sLvUIf3KNZO8i1GSpRjT5w-1; Mon, 21 Feb 2022 11:22:47 -0500 X-MC-Unique: sLvUIf3KNZO8i1GSpRjT5w-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 65A482F4C; Mon, 21 Feb 2022 16:22:46 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 0923177477; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 05/25] KVM: x86/mmu: rephrase unclear comment Date: Mon, 21 Feb 2022 11:22:23 -0500 Message-Id: <20220221162243.683208-6-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" If accessed bits are not supported there simple isn't any distinction between accessed and non-accessed gPTEs, so the comment does not make much sense. Rephrase it in terms of what happens if accessed bits *are* supported. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 80b4b291002a..d1d17d28e81b 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -193,7 +193,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcp= u *vcpu, if (!FNAME(is_present_gpte)(gpte)) goto no_present; =20 - /* if accessed bit is not supported prefetch non accessed gpte */ + /* if accessed bit is supported, prefetch only accessed gpte */ if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK)) goto no_present; --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 174B1C433EF for ; Mon, 21 Feb 2022 16:23:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380284AbiBUQXu (ORCPT ); Mon, 21 Feb 2022 11:23:50 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53404 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380212AbiBUQXQ (ORCPT ); Mon, 21 Feb 2022 11:23:16 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id AC69727152 for ; Mon, 21 Feb 2022 08:22:52 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460571; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=sHLGPL/EpORL8eln5yr/neSEVnvit4E1fsmv6+KfrzA=; b=RdRqiqf22aT5U7Jik7tq4OHAvG6p1preA3YuXtecqBVvaXq+p7ZM4HTb5utggDjsfeuLfe 4qsCsCUjofY7JjAl4E0TNUP/VtRR1GsQq0BTlZmz7Q3A8gCCePRThtkAV3eEonBHbH/vwU 7X1yQlxgC3SezUoTc5NQIAu+R4BnMbs= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-492-lkQ-WVWmMbK7lFtovrgbJA-1; Mon, 21 Feb 2022 11:22:48 -0500 X-MC-Unique: lkQ-WVWmMbK7lFtovrgbJA-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 3131F1926DA0; Mon, 21 Feb 2022 16:22:47 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 9CBB077468; Mon, 21 Feb 2022 16:22:46 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 06/25] KVM: nVMX/nSVM: do not monkey-patch inject_page_fault callback Date: Mon, 21 Feb 2022 11:22:24 -0500 Message-Id: <20220221162243.683208-7-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Currently, vendor code is patching the inject_page_fault and later, on vmexit, expecting kvm_init_mmu to restore the inject_page_fault callback. This is brittle, as exposed by the fact that SVM KVM_SET_NESTED_STATE forgets to do it. Instead, do the check at the time a page fault actually has to be injected. This does incur the cost of an extra retpoline for nested vmexits when TDP is disabled, but is overall much cleaner. While at it, add a comment that explains why the different behavior is needed in this case. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/svm/nested.c | 4 +--- arch/x86/kvm/vmx/nested.c | 4 +--- arch/x86/kvm/x86.c | 17 +++++++++++++++++ 5 files changed, 23 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index 713e08f62385..92855d3984a7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1508,6 +1508,8 @@ struct kvm_x86_nested_ops { int (*enable_evmcs)(struct kvm_vcpu *vcpu, uint16_t *vmcs_version); uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu); + void (*inject_page_fault)(struct kvm_vcpu *vcpu, + struct x86_exception *fault); }; =20 struct kvm_x86_init_ops { @@ -1747,6 +1749,7 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, uns= igned nr, unsigned long pay void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error= _code); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fa= ult); +void kvm_inject_page_fault_shadow(struct kvm_vcpu *vcpu, struct x86_except= ion *fault); bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0e393506f4df..f3494dcc4e2f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4950,7 +4950,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu, =20 context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; - context->inject_page_fault =3D kvm_inject_page_fault; + context->inject_page_fault =3D kvm_inject_page_fault_shadow; } =20 static union kvm_mmu_role diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 96bab464967f..ff58c9ebc552 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -680,9 +680,6 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmc= b12_gpa, if (ret) return ret; =20 - if (!npt_enabled) - vcpu->arch.mmu->inject_page_fault =3D svm_inject_page_fault_nested; - if (!from_vmrun) kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); =20 @@ -1571,4 +1568,5 @@ struct kvm_x86_nested_ops svm_nested_ops =3D { .get_nested_state_pages =3D svm_get_nested_state_pages, .get_state =3D svm_get_nested_state, .set_state =3D svm_set_nested_state, + .inject_page_fault =3D svm_inject_page_fault_nested, }; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 1dfe23963a9e..564c60566da7 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2615,9 +2615,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, stru= ct vmcs12 *vmcs12, vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } =20 - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault =3D vmx_inject_page_fault_nested; - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, vmcs12->guest_ia32_perf_global_ctrl))) { @@ -6807,4 +6804,5 @@ struct kvm_x86_nested_ops vmx_nested_ops =3D { .write_log_dirty =3D nested_vmx_write_pml_buffer, .enable_evmcs =3D nested_enable_evmcs, .get_evmcs_version =3D nested_get_evmcs_version, + .inject_page_fault =3D vmx_inject_page_fault_nested, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index da33d3a88a8d..1546a25a9307 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -746,6 +746,23 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, stru= ct x86_exception *fault) } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); =20 +void kvm_inject_page_fault_shadow(struct kvm_vcpu *vcpu, + struct x86_exception *fault) +{ + /* + * The core exception injection code is not able to combine + * an exception with a vmexit; if a page fault happens while + * a page fault exception is being delivered, the original + * page fault would be changed incorrectly into a double + * fault. To work around this, #PF vmexits are injected + * without going through kvm_queue_exception. + */ + if (unlikely(is_guest_mode(vcpu))) + kvm_x86_ops.nested_ops->inject_page_fault(vcpu, fault); + else + kvm_inject_page_fault(vcpu, fault); +} + bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C1E00C433EF for ; Mon, 21 Feb 2022 16:23:17 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380322AbiBUQXi (ORCPT ); Mon, 21 Feb 2022 11:23:38 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53424 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380202AbiBUQXP (ORCPT ); Mon, 21 Feb 2022 11:23:15 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 0C9B327179 for ; Mon, 21 Feb 2022 08:22:50 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460570; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=9iGVRyuF8eH6f/8Mcput9LmHYap7PbXCJFm6PpLG9mk=; b=diHPDT7jvo/nTMlPrY1xVJP1XNiVYJZvswYWyMVg+rdvVFsJFeHHiO5u4JEIj2x7li6s+7 5+agSbnt/mp35tMU7cA+863MJDjvzPQ0Qc2FGv5qieO8au+wxRswu0ufFjcbSoX7AQXLyw u5ZGZVrNbSAXlDLqHK/7xnkA9sarDFs= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-119-j7TOGg20M5SxcjVJNTUc9Q-1; Mon, 21 Feb 2022 11:22:48 -0500 X-MC-Unique: j7TOGg20M5SxcjVJNTUc9Q-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id A88621006AA0; Mon, 21 Feb 2022 16:22:47 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 4C22077468; Mon, 21 Feb 2022 16:22:47 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 07/25] KVM: x86/mmu: remove "bool base_only" arguments Date: Mon, 21 Feb 2022 11:22:25 -0500 Message-Id: <20220221162243.683208-8-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The argument is always false now that kvm_mmu_calc_root_page_role has been removed. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 66 +++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 43 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f3494dcc4e2f..7c835253a330 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4680,47 +4680,30 @@ static void paging32_init_context(struct kvm_mmu *c= ontext) context->direct_map =3D false; } =20 -static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *= vcpu, - const struct kvm_mmu_role_regs *regs) -{ - union kvm_mmu_extended_role ext =3D {0}; - - if (____is_cr0_pg(regs)) { - ext.cr0_pg =3D 1; - ext.cr4_pae =3D ____is_cr4_pae(regs); - ext.cr4_smep =3D ____is_cr4_smep(regs); - ext.cr4_smap =3D ____is_cr4_smap(regs); - ext.cr4_pse =3D ____is_cr4_pse(regs); - - /* PKEY and LA57 are active iff long mode is active. */ - ext.cr4_pke =3D ____is_efer_lma(regs) && ____is_cr4_pke(regs); - ext.cr4_la57 =3D ____is_efer_lma(regs) && ____is_cr4_la57(regs); - ext.efer_lma =3D ____is_efer_lma(regs); - } - - ext.valid =3D 1; - - return ext; -} - static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs, - bool base_only) + const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role =3D {0}; =20 role.base.access =3D ACC_ALL; if (____is_cr0_pg(regs)) { + role.ext.cr0_pg =3D 1; role.base.efer_nx =3D ____is_efer_nx(regs); role.base.cr0_wp =3D ____is_cr0_wp(regs); + + role.ext.cr4_pae =3D ____is_cr4_pae(regs); + role.ext.cr4_smep =3D ____is_cr4_smep(regs); + role.ext.cr4_smap =3D ____is_cr4_smap(regs); + role.ext.cr4_pse =3D ____is_cr4_pse(regs); + + /* PKEY and LA57 are active iff long mode is active. */ + role.ext.cr4_pke =3D ____is_efer_lma(regs) && ____is_cr4_pke(regs); + role.ext.cr4_la57 =3D ____is_efer_lma(regs) && ____is_cr4_la57(regs); + role.ext.efer_lma =3D ____is_efer_lma(regs); } role.base.smm =3D is_smm(vcpu); role.base.guest_mode =3D is_guest_mode(vcpu); - - if (base_only) - return role; - - role.ext =3D kvm_calc_mmu_role_ext(vcpu, regs); + role.ext.valid =3D 1; =20 return role; } @@ -4740,10 +4723,9 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_v= cpu *vcpu) =20 static union kvm_mmu_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs, - bool base_only) + const struct kvm_mmu_role_regs *regs) { - union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs, base_onl= y); + union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs); =20 role.base.ad_disabled =3D (shadow_accessed_mask =3D=3D 0); role.base.level =3D kvm_mmu_get_tdp_level(vcpu); @@ -4758,7 +4740,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role new_role =3D - kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false); + kvm_calc_tdp_mmu_root_page_role(vcpu, regs); =20 if (new_role.as_u64 =3D=3D context->mmu_role.as_u64) return; @@ -4787,10 +4769,9 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs, - bool base_only) + const struct kvm_mmu_role_regs *regs) { - union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs, base_onl= y); + union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs); =20 role.base.smep_andnot_wp =3D role.ext.cr4_smep && !____is_cr0_wp(regs); role.base.smap_andnot_wp =3D role.ext.cr4_smap && !____is_cr0_wp(regs); @@ -4801,11 +4782,10 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vc= pu *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs, - bool base_only) + const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role =3D - kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only); + kvm_calc_shadow_root_page_role_common(vcpu, regs); =20 role.base.direct =3D !____is_cr0_pg(regs); =20 @@ -4847,7 +4827,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role new_role =3D - kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false); + kvm_calc_shadow_mmu_root_page_role(vcpu, regs); =20 shadow_mmu_init_context(vcpu, context, regs, new_role); } @@ -4857,7 +4837,7 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *v= cpu, const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role =3D - kvm_calc_shadow_root_page_role_common(vcpu, regs, false); + kvm_calc_shadow_root_page_role_common(vcpu, regs); =20 role.base.direct =3D false; role.base.level =3D kvm_mmu_get_tdp_level(vcpu); @@ -4958,7 +4938,7 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const= struct kvm_mmu_role_regs * { union kvm_mmu_role role; =20 - role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs, false); + role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs); =20 /* * Nested MMUs are used only for walking L2's gva->gpa, they never have --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5C28BC433F5 for ; Mon, 21 Feb 2022 16:23:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380258AbiBUQYB (ORCPT ); Mon, 21 Feb 2022 11:24:01 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53404 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380219AbiBUQXR (ORCPT ); Mon, 21 Feb 2022 11:23:17 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 908AC2716B for ; Mon, 21 Feb 2022 08:22:53 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460572; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=JgC7K68EGURf/5nseSGFk+gLjMEs+In6PNVCqkhk4fs=; b=GTHlIOOLzJU2LuGgAB5hrhgVcRfSQGLK0w3mPV+FnUoqf9lafCxven22PhaMUi8KlpY7wP t7dqZwZtrtosts9xJO412ytGSGeAvj1unRrLHkt9aXNMdvOXbNffVivy/Ksye+tFBmYoPb Hx/qtG8rck8Boe93o6NuBBGzlSrhqqc= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-528-mIOYH9jIOYSOB1LiQNxKzw-1; Mon, 21 Feb 2022 11:22:49 -0500 X-MC-Unique: mIOYH9jIOYSOB1LiQNxKzw-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 2C44E801B0F; Mon, 21 Feb 2022 16:22:48 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id C375D77468; Mon, 21 Feb 2022 16:22:47 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 08/25] KVM: x86/mmu: split cpu_mode from mmu_role Date: Mon, 21 Feb 2022 11:22:26 -0500 Message-Id: <20220221162243.683208-9-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Snapshot the state of the processor registers that govern page walk into a new field of struct kvm_mmu. This is a more natural representation than having it *mostly* in mmu_role but not exclusively; the delta right now is represented in other fields, such as root_level. The nested MMU now has only the CPU mode; and in fact the new function kvm_calc_cpu_mode is analogous to the previous kvm_calc_nested_mmu_role, except that it has role.base.direct equal to CR0.PG. It is not clear what the code meant by "setting role.base.direct to true to detect bogus usage of the nested MMU". Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 107 ++++++++++++++++++++------------ arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 3 files changed, 68 insertions(+), 42 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index 92855d3984a7..cc268116eb3f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -433,6 +433,7 @@ struct kvm_mmu { struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); struct kvm_mmu_root_info root; + union kvm_mmu_role cpu_mode; union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7c835253a330..1af898f0cf87 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -221,7 +221,7 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA); #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \ static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ { \ - return !!(mmu->mmu_role. base_or_ext . reg##_##name); \ + return !!(mmu->cpu_mode. base_or_ext . reg##_##name); \ } BUILD_MMU_ROLE_ACCESSOR(ext, cr0, pg); BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp); @@ -4680,6 +4680,39 @@ static void paging32_init_context(struct kvm_mmu *co= ntext) context->direct_map =3D false; } =20 +static union kvm_mmu_role +kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *r= egs) +{ + union kvm_mmu_role role =3D {0}; + + role.base.access =3D ACC_ALL; + role.base.smm =3D is_smm(vcpu); + role.base.guest_mode =3D is_guest_mode(vcpu); + role.base.direct =3D !____is_cr0_pg(regs); + if (!role.base.direct) { + role.base.efer_nx =3D ____is_efer_nx(regs); + role.base.cr0_wp =3D ____is_cr0_wp(regs); + role.base.smep_andnot_wp =3D ____is_cr4_smep(regs) && !____is_cr0_wp(reg= s); + role.base.smap_andnot_wp =3D ____is_cr4_smap(regs) && !____is_cr0_wp(reg= s); + role.base.has_4_byte_gpte =3D !____is_cr4_pae(regs); + role.base.level =3D role_regs_to_root_level(regs); + + role.ext.cr0_pg =3D 1; + role.ext.cr4_pae =3D ____is_cr4_pae(regs); + role.ext.cr4_smep =3D ____is_cr4_smep(regs); + role.ext.cr4_smap =3D ____is_cr4_smap(regs); + role.ext.cr4_pse =3D ____is_cr4_pse(regs); + + /* PKEY and LA57 are active iff long mode is active. */ + role.ext.cr4_pke =3D ____is_efer_lma(regs) && ____is_cr4_pke(regs); + role.ext.cr4_la57 =3D ____is_efer_lma(regs) && ____is_cr4_la57(regs); + role.ext.efer_lma =3D ____is_efer_lma(regs); + } + + role.ext.valid =3D 1; + return role; +} + static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { @@ -4739,13 +4772,16 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_role new_role =3D + union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); + union kvm_mmu_role mmu_role =3D kvm_calc_tdp_mmu_root_page_role(vcpu, regs); =20 - if (new_role.as_u64 =3D=3D context->mmu_role.as_u64) + if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && + mmu_role.as_u64 =3D=3D context->mmu_role.as_u64) return; =20 - context->mmu_role.as_u64 =3D new_role.as_u64; + context->cpu_mode.as_u64 =3D cpu_mode.as_u64; + context->mmu_role.as_u64 =3D mmu_role.as_u64; context->page_fault =3D kvm_tdp_page_fault; context->sync_page =3D nonpaging_sync_page; context->invlpg =3D NULL; @@ -4800,13 +4836,15 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu = *vcpu, } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, - const struct kvm_mmu_role_regs *regs, - union kvm_mmu_role new_role) + union kvm_mmu_role cpu_mode, + union kvm_mmu_role mmu_role) { - if (new_role.as_u64 =3D=3D context->mmu_role.as_u64) + if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && + mmu_role.as_u64 =3D=3D context->mmu_role.as_u64) return; =20 - context->mmu_role.as_u64 =3D new_role.as_u64; + context->cpu_mode.as_u64 =3D cpu_mode.as_u64; + context->mmu_role.as_u64 =3D mmu_role.as_u64; =20 if (!is_cr0_pg(context)) nonpaging_init_context(context); @@ -4814,10 +4852,10 @@ static void shadow_mmu_init_context(struct kvm_vcpu= *vcpu, struct kvm_mmu *conte paging64_init_context(context); else paging32_init_context(context); - context->root_level =3D role_regs_to_root_level(regs); + context->root_level =3D cpu_mode.base.level; =20 reset_guest_paging_metadata(vcpu, context); - context->shadow_root_level =3D new_role.base.level; + context->shadow_root_level =3D mmu_role.base.level; =20 reset_shadow_zero_bits_mask(vcpu, context); } @@ -4826,10 +4864,11 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vc= pu, const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_role new_role =3D + union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); + union kvm_mmu_role mmu_role =3D kvm_calc_shadow_mmu_root_page_role(vcpu, regs); =20 - shadow_mmu_init_context(vcpu, context, regs, new_role); + shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role); } =20 static union kvm_mmu_role @@ -4854,11 +4893,10 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu,= unsigned long cr0, .cr4 =3D cr4 & ~X86_CR4_PKE, .efer =3D efer, }; - union kvm_mmu_role new_role; + union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); + union kvm_mmu_role mmu_role =3D kvm_calc_shadow_npt_root_page_role(vcpu, = ®s);; =20 - new_role =3D kvm_calc_shadow_npt_root_page_role(vcpu, ®s); - - shadow_mmu_init_context(vcpu, context, ®s, new_role); + shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role); kvm_mmu_new_pgd(vcpu, nested_cr3); } EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); @@ -4881,7 +4919,6 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *v= cpu, bool accessed_dirty, role.base.guest_mode =3D true; role.base.access =3D ACC_ALL; =20 - /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */ role.ext.word =3D 0; role.ext.execonly =3D execonly; role.ext.valid =3D 1; @@ -4895,12 +4932,14 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu,= bool execonly, { struct kvm_mmu *context =3D &vcpu->arch.guest_mmu; u8 level =3D vmx_eptp_page_walk_level(new_eptp); - union kvm_mmu_role new_role =3D + union kvm_mmu_role new_mode =3D kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, execonly, level); =20 - if (new_role.as_u64 !=3D context->mmu_role.as_u64) { - context->mmu_role.as_u64 =3D new_role.as_u64; + if (new_mode.as_u64 !=3D context->cpu_mode.as_u64) { + /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */ + context->cpu_mode.as_u64 =3D new_mode.as_u64; + context->mmu_role.as_u64 =3D new_mode.as_u64; =20 context->shadow_root_level =3D level; =20 @@ -4933,36 +4972,19 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu, context->inject_page_fault =3D kvm_inject_page_fault_shadow; } =20 -static union kvm_mmu_role -kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_= regs *regs) -{ - union kvm_mmu_role role; - - role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs); - - /* - * Nested MMUs are used only for walking L2's gva->gpa, they never have - * shadow pages of their own and so "direct" has no meaning. Set it - * to "true" to try to detect bogus usage of the nested MMU. - */ - role.base.direct =3D true; - role.base.level =3D role_regs_to_root_level(regs); - return role; -} - static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mm= u_role_regs *regs) { - union kvm_mmu_role new_role =3D kvm_calc_nested_mmu_role(vcpu, regs); + union kvm_mmu_role new_mode =3D kvm_calc_cpu_mode(vcpu, regs); struct kvm_mmu *g_context =3D &vcpu->arch.nested_mmu; =20 - if (new_role.as_u64 =3D=3D g_context->mmu_role.as_u64) + if (new_mode.as_u64 =3D=3D g_context->cpu_mode.as_u64) return; =20 - g_context->mmu_role.as_u64 =3D new_role.as_u64; + g_context->cpu_mode.as_u64 =3D new_mode.as_u64; g_context->get_guest_pgd =3D kvm_get_guest_cr3; g_context->get_pdptr =3D kvm_pdptr_read; g_context->inject_page_fault =3D kvm_inject_page_fault; - g_context->root_level =3D new_role.base.level; + g_context->root_level =3D new_mode.base.level; =20 /* * L2 page tables are never shadowed, so there is no need to sync @@ -5020,6 +5042,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.root_mmu.mmu_role.ext.valid =3D 0; vcpu->arch.guest_mmu.mmu_role.ext.valid =3D 0; vcpu->arch.nested_mmu.mmu_role.ext.valid =3D 0; + vcpu->arch.root_mmu.cpu_mode.ext.valid =3D 0; + vcpu->arch.guest_mmu.cpu_mode.ext.valid =3D 0; + vcpu->arch.nested_mmu.cpu_mode.ext.valid =3D 0; kvm_mmu_reset_context(vcpu); =20 /* diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index d1d17d28e81b..e1c2ecb4ddee 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -323,7 +323,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *= mmu, * is not reserved and does not indicate a large page at this level, * so clear PT_PAGE_SIZE_MASK in gpte if that is the case. */ - gpte &=3D level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse); + gpte &=3D level - (PT32_ROOT_LEVEL + mmu->cpu_mode.ext.cr4_pse); #endif /* * PG_LEVEL_4K always terminates. The RHS has bit 7 set --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 30A48C43219 for ; Mon, 21 Feb 2022 16:23:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380271AbiBUQX6 (ORCPT ); Mon, 21 Feb 2022 11:23:58 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53432 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380220AbiBUQXR (ORCPT ); Mon, 21 Feb 2022 11:23:17 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id CFB4027149 for ; Mon, 21 Feb 2022 08:22:53 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460573; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=mAJu1ZYoyOWVoqiIHT/VPmictdwQ4/efd597mncfWaY=; b=DBz4UYpFoA2iE7gSVhhUv5bY3HobDHR2ipBB8qsj7b1TQeVXgRNIIQJxyACI4NT5Dnl8Fl 809mHgaHV/2VNn7DFtn/6TXCKptQ5qqR+6P8kkGYKLIc7A3C6H6rPVMI5l/DxIYxF63sYU TEgZb7NaUea10qr3MPk6G1S0Hou7qh8= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-546-QdDTdJ1gO7qvY7-tRlUjdg-1; Mon, 21 Feb 2022 11:22:49 -0500 X-MC-Unique: QdDTdJ1gO7qvY7-tRlUjdg-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id A28531926DA3; Mon, 21 Feb 2022 16:22:48 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 4664877468; Mon, 21 Feb 2022 16:22:48 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 09/25] KVM: x86/mmu: do not recompute root level from kvm_mmu_role_regs Date: Mon, 21 Feb 2022 11:22:27 -0500 Message-Id: <20220221162243.683208-10-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The root_level can be found in the cpu_mode (in fact the field is superfluous and could be removed, but one thing at a time). Since there is only one usage left of role_regs_to_root_level, inline it into kvm_calc_cpu_mode. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 1af898f0cf87..6e539fc2c9c7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -244,19 +244,6 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(stru= ct kvm_vcpu *vcpu) return regs; } =20 -static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs) -{ - if (!____is_cr0_pg(regs)) - return 0; - else if (____is_efer_lma(regs)) - return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL : - PT64_ROOT_4LEVEL; - else if (____is_cr4_pae(regs)) - return PT32E_ROOT_LEVEL; - else - return PT32_ROOT_LEVEL; -} - static inline bool kvm_available_flush_tlb_with_range(void) { return kvm_x86_ops.tlb_remote_flush_with_range; @@ -4695,7 +4682,13 @@ kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struc= t kvm_mmu_role_regs *regs) role.base.smep_andnot_wp =3D ____is_cr4_smep(regs) && !____is_cr0_wp(reg= s); role.base.smap_andnot_wp =3D ____is_cr4_smap(regs) && !____is_cr0_wp(reg= s); role.base.has_4_byte_gpte =3D !____is_cr4_pae(regs); - role.base.level =3D role_regs_to_root_level(regs); + + if (____is_efer_lma(regs)) + role.base.level =3D ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL : PT64_ROO= T_4LEVEL; + else if (____is_cr4_pae(regs)) + role.base.level =3D PT32E_ROOT_LEVEL; + else + role.base.level =3D PT32_ROOT_LEVEL; =20 role.ext.cr0_pg =3D 1; role.ext.cr4_pae =3D ____is_cr4_pae(regs); @@ -4790,7 +4783,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault; - context->root_level =3D role_regs_to_root_level(regs); + context->root_level =3D cpu_mode.base.level; =20 if (!is_cr0_pg(context)) context->gva_to_gpa =3D nonpaging_gva_to_gpa; --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8E578C433FE for ; Mon, 21 Feb 2022 16:23:34 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380362AbiBUQXz (ORCPT ); Mon, 21 Feb 2022 11:23:55 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53432 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380213AbiBUQXQ (ORCPT ); Mon, 21 Feb 2022 11:23:16 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id F04BE27154 for ; Mon, 21 Feb 2022 08:22:52 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460572; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=l1Gf8VtNAt2/Qxfp7otaTjlAdjKtIcomYYyTzHKIFnw=; b=BUAK80f4sF1qMkHCTrpZNQeEXJkCxYk/aW+RgspsYPH3bYoIuhHJg1oV3Upj7USt/qWiWE bexQsEWvUBQZR/QZxQcCXRHmFzXVLGQWDBGcWJ+3pTEXpSlAiGlTE9PHezmJUpW5JGmGPt hr1C8lzgqKuaNJ5MDphzRVs/YA5nduU= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-287-K4zjalEVNS6oGDkj65wlmw-1; Mon, 21 Feb 2022 11:22:50 -0500 X-MC-Unique: K4zjalEVNS6oGDkj65wlmw-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 755512F4B; Mon, 21 Feb 2022 16:22:49 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 16A5578AA5; Mon, 21 Feb 2022 16:22:49 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 10/25] KVM: x86/mmu: remove ept_ad field Date: Mon, 21 Feb 2022 11:22:28 -0500 Message-Id: <20220221162243.683208-11-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The ept_ad field is used during page walk to determine if the guest PTEs have accessed and dirty bits. In the MMU role, the ad_disabled bit represents whether the *shadow* PTEs have the bits, so it would be incorrect to replace PT_HAVE_ACCESSED_DIRTY with just !mmu->mmu_role.base.ad_disabled. However, the similar field in the CPU mode, ad_disabled, is initialized correctly: to the opposite value of ept_ad for shadow EPT, and zero for non-EPT guest paging modes (which always have A/D bits). It is therefore possible to compute PT_HAVE_ACCESSED_DIRTY from the CPU mode, like other page-format fields; it just has to be inverted to account for the different polarity. Having a CPU mode that is distinct from the MMU roles in fact would even allow to remove PT_HAVE_ACCESSED_DIRTY macro altogether, and always use !mmu->cpu_mode.base.ad_disabled. I am not doing this because the macro has a small effect in terms of dead code elimination: text data bss dec hex 103544 16665 112 120321 1d601 # as of this patch 103746 16665 112 120523 1d6cb # without PT_HAVE_ACCESSED_DIRTY Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 1 - arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index cc268116eb3f..996cf9b14f5e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -437,7 +437,6 @@ struct kvm_mmu { union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; - u8 ept_ad; bool direct_map; struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; =20 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6e539fc2c9c7..3ffa6f2bf991 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4936,7 +4936,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, =20 context->shadow_root_level =3D level; =20 - context->ept_ad =3D accessed_dirty; context->page_fault =3D ept_page_fault; context->gva_to_gpa =3D ept_gva_to_gpa; context->sync_page =3D ept_sync_page; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index e1c2ecb4ddee..64b6f76641f0 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -64,7 +64,7 @@ #define PT_LEVEL_BITS PT64_LEVEL_BITS #define PT_GUEST_DIRTY_SHIFT 9 #define PT_GUEST_ACCESSED_SHIFT 8 - #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) + #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_mode.base.ad_disabled) #define CMPXCHG cmpxchg64 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #else --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5D542C433EF for ; Mon, 21 Feb 2022 16:23:44 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380194AbiBUQYE (ORCPT ); Mon, 21 Feb 2022 11:24:04 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53488 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380225AbiBUQXS (ORCPT ); Mon, 21 Feb 2022 11:23:18 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 50AFA27179 for ; Mon, 21 Feb 2022 08:22:55 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460574; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=xTJnCkKdsdNvfz4Xp1aH2PLk7lwHUyThFuz2jZw0IQo=; b=cHu9/XZLTXlr3mRPxCQxNJ7BJIQXqheE0GuNmh+xzGR1WEJcPQJepkyqsN8ISSecRQGUEd ZmBBmQ6nYH4D+ofdn0orAz6mbbrPp5q6JtPClZZpY+B0GgSl3Hb47PNHyYKxxNWgtr2HZb H1QVLIOVWSyxAN3zW9cUrYXnJmpVOho= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-540-lCL9fJ85NXKPdLiy9lTs7g-1; Mon, 21 Feb 2022 11:22:51 -0500 X-MC-Unique: lCL9fJ85NXKPdLiy9lTs7g-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id EC60F1006AA0; Mon, 21 Feb 2022 16:22:49 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 8F4F678AA5; Mon, 21 Feb 2022 16:22:49 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 11/25] KVM: x86/mmu: remove kvm_calc_shadow_root_page_role_common Date: Mon, 21 Feb 2022 11:22:29 -0500 Message-Id: <20220221162243.683208-12-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" kvm_calc_shadow_root_page_role_common is the same as kvm_calc_cpu_mode except for the level, which is overwritten afterwards in kvm_calc_shadow_mmu_root_page_role and kvm_calc_shadow_npt_root_page_role. role.base.direct is already set correctly for the CPU mode, and CR0.PG=3D1 is required for VMRUN so it will also be correct for nested NPT. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3ffa6f2bf991..31874fad12fb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4796,27 +4796,11 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, reset_tdp_shadow_zero_bits_mask(context); } =20 -static union kvm_mmu_role -kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) -{ - union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs); - - role.base.smep_andnot_wp =3D role.ext.cr4_smep && !____is_cr0_wp(regs); - role.base.smap_andnot_wp =3D role.ext.cr4_smap && !____is_cr0_wp(regs); - role.base.has_4_byte_gpte =3D ____is_cr0_pg(regs) && !____is_cr4_pae(regs= ); - - return role; -} - static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { - union kvm_mmu_role role =3D - kvm_calc_shadow_root_page_role_common(vcpu, regs); - - role.base.direct =3D !____is_cr0_pg(regs); + union kvm_mmu_role role =3D kvm_calc_cpu_mode(vcpu, regs); =20 if (!____is_efer_lma(regs)) role.base.level =3D PT32E_ROOT_LEVEL; @@ -4869,9 +4853,8 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *v= cpu, const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role =3D - kvm_calc_shadow_root_page_role_common(vcpu, regs); + kvm_calc_cpu_mode(vcpu, regs); =20 - role.base.direct =3D false; role.base.level =3D kvm_mmu_get_tdp_level(vcpu); =20 return role; --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B9FA8C433FE for ; Mon, 21 Feb 2022 16:23:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380413AbiBUQYN (ORCPT ); Mon, 21 Feb 2022 11:24:13 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53546 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380234AbiBUQXU (ORCPT ); Mon, 21 Feb 2022 11:23:20 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 9370B27152 for ; Mon, 21 Feb 2022 08:22:56 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460575; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=1zP8NjyTETUCXArNph0FHuCwd64/z+cH8qWwcxs072Y=; b=aNeozQY+YrV0Z5cD8z27roFTOWwJif9h3jDoJZ5/xAR1KnnGXXAJ1YJp8MA3Wd3ziSrdOR qtnL0sUnPktIbMzVbUJUAdRPrNHQ5WFD3d4oilzMX9WhdxtCaKXJOXBE1KZrgyUCghryKt NNPjPgOSnAx/Hm5QZ5H4mFLVcEUdWBI= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-232-vc9mpemLMA64yjS6nMNb0Q-1; Mon, 21 Feb 2022 11:22:51 -0500 X-MC-Unique: vc9mpemLMA64yjS6nMNb0Q-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 6FEAD801B0B; Mon, 21 Feb 2022 16:22:50 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 1305478AB5; Mon, 21 Feb 2022 16:22:50 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 12/25] KVM: x86/mmu: cleanup computation of MMU roles for two-dimensional paging Date: Mon, 21 Feb 2022 11:22:30 -0500 Message-Id: <20220221162243.683208-13-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Inline kvm_calc_mmu_role_common into its sole caller, and simplify it by removing the computation of unnecessary bits. Extended bits are unnecessary because page walking uses the CPU mode, and EFER.NX/CR0.WP can be set to one unconditionally---matching the format of shadow pages rather than the format of guest pages. The MMU role for two dimensional paging does still depend on the CPU mode, even if only barely so, due to SMM and guest mode; for consistency, pass it down to kvm_calc_tdp_mmu_root_page_role instead of querying the vcpu with is_smm or is_guest_mode. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 41 +++++++++-------------------------------- 1 file changed, 9 insertions(+), 32 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 31874fad12fb..0a08ab8e2e4e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4706,34 +4706,6 @@ kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struc= t kvm_mmu_role_regs *regs) return role; } =20 -static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) -{ - union kvm_mmu_role role =3D {0}; - - role.base.access =3D ACC_ALL; - if (____is_cr0_pg(regs)) { - role.ext.cr0_pg =3D 1; - role.base.efer_nx =3D ____is_efer_nx(regs); - role.base.cr0_wp =3D ____is_cr0_wp(regs); - - role.ext.cr4_pae =3D ____is_cr4_pae(regs); - role.ext.cr4_smep =3D ____is_cr4_smep(regs); - role.ext.cr4_smap =3D ____is_cr4_smap(regs); - role.ext.cr4_pse =3D ____is_cr4_pse(regs); - - /* PKEY and LA57 are active iff long mode is active. */ - role.ext.cr4_pke =3D ____is_efer_lma(regs) && ____is_cr4_pke(regs); - role.ext.cr4_la57 =3D ____is_efer_lma(regs) && ____is_cr4_la57(regs); - role.ext.efer_lma =3D ____is_efer_lma(regs); - } - role.base.smm =3D is_smm(vcpu); - role.base.guest_mode =3D is_guest_mode(vcpu); - role.ext.valid =3D 1; - - return role; -} - static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) { /* tdp_root_level is architecture forced level, use it if nonzero */ @@ -4749,14 +4721,20 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_= vcpu *vcpu) =20 static union kvm_mmu_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role cpu_mode) { - union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs); + union kvm_mmu_role role =3D {0}; =20 + role.base.access =3D ACC_ALL; + role.base.cr0_wp =3D true; + role.base.efer_nx =3D true; + role.base.smm =3D cpu_mode.base.smm; + role.base.guest_mode =3D cpu_mode.base.guest_mode; role.base.ad_disabled =3D (shadow_accessed_mask =3D=3D 0); role.base.level =3D kvm_mmu_get_tdp_level(vcpu); role.base.direct =3D true; role.base.has_4_byte_gpte =3D false; + role.ext.valid =3D true; =20 return role; } @@ -4766,8 +4744,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); - union kvm_mmu_role mmu_role =3D - kvm_calc_tdp_mmu_root_page_role(vcpu, regs); + union kvm_mmu_role mmu_role =3D kvm_calc_tdp_mmu_root_page_role(vcpu, cpu= _mode); =20 if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && mmu_role.as_u64 =3D=3D context->mmu_role.as_u64) --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5736BC433F5 for ; Mon, 21 Feb 2022 16:24:00 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380420AbiBUQYU (ORCPT ); Mon, 21 Feb 2022 11:24:20 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53520 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380231AbiBUQXT (ORCPT ); Mon, 21 Feb 2022 11:23:19 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 47F2027154 for ; Mon, 21 Feb 2022 08:22:56 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460575; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=Uy3Pyc+4AlM9oZHo3jTP9USfqWRT/Egoz0d9uTiykZ4=; b=Qn0GEFjOS64vKWzWerG5m85gjs+ndLokRLwEtmb0WiLolno2wzvIpbbre3qkqk9ea/+WQm HbxbX9gd/8R32zHhXCQok8Y0duGzycvtP4/3VH23b3NWCvFLn5EMZO+ZfP6PR4grpg5zw6 LyXOmhXaZQFQEkowTfU9Scru5GaLgQ4= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-60-WGRErQCDNI6jFgr_8R1lRA-1; Mon, 21 Feb 2022 11:22:52 -0500 X-MC-Unique: WGRErQCDNI6jFgr_8R1lRA-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id E72721006AA5; Mon, 21 Feb 2022 16:22:50 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 8A6A778AA5; Mon, 21 Feb 2022 16:22:50 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 13/25] KVM: x86/mmu: cleanup computation of MMU roles for shadow paging Date: Mon, 21 Feb 2022 11:22:31 -0500 Message-Id: <20220221162243.683208-14-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Pass the already-computed CPU mode, instead of redoing it. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0a08ab8e2e4e..725aef55c08f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4775,13 +4775,11 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role role) { - union kvm_mmu_role role =3D kvm_calc_cpu_mode(vcpu, regs); - - if (!____is_efer_lma(regs)) + if (!role.ext.efer_lma) role.base.level =3D PT32E_ROOT_LEVEL; - else if (____is_cr4_la57(regs)) + else if (role.ext.cr4_la57) role.base.level =3D PT64_ROOT_5LEVEL; else role.base.level =3D PT64_ROOT_4LEVEL; @@ -4820,18 +4818,15 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vc= pu, struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); union kvm_mmu_role mmu_role =3D - kvm_calc_shadow_mmu_root_page_role(vcpu, regs); + kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode); =20 shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role); } =20 static union kvm_mmu_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role role) { - union kvm_mmu_role role =3D - kvm_calc_cpu_mode(vcpu, regs); - role.base.level =3D kvm_mmu_get_tdp_level(vcpu); =20 return role; @@ -4847,7 +4842,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u= nsigned long cr0, .efer =3D efer, }; union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); - union kvm_mmu_role mmu_role =3D kvm_calc_shadow_npt_root_page_role(vcpu, = ®s);; + union kvm_mmu_role mmu_role =3D kvm_calc_shadow_npt_root_page_role(vcpu, = cpu_mode); =20 shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role); kvm_mmu_new_pgd(vcpu, nested_cr3); --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 54367C433F5 for ; Mon, 21 Feb 2022 16:23:56 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380417AbiBUQYS (ORCPT ); Mon, 21 Feb 2022 11:24:18 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53568 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380240AbiBUQXV (ORCPT ); Mon, 21 Feb 2022 11:23:21 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id DFB242716B for ; Mon, 21 Feb 2022 08:22:56 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460576; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=tYb6r12HjD9UpDIda2UW5abpGynZm1DBV0FT29YnZw8=; b=XviavweiSoF8Gw4AFUPqIqQFgTsU6ck41I/FfYeYYR7HJ4L19uCIiItQJXamUaCX1Xwlbx DVGiKbBoE9fzawo01vfeEZJtuxSeyKA/bW9/dBA8ccWWdvbG1RsRNXgexbdfG8nevFWlou MJIbqkumhBqOq36AmxnVpOcBex7h8pg= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-101-k8-hEfrVOziRuPUf0IzjnA-1; Mon, 21 Feb 2022 11:22:52 -0500 X-MC-Unique: k8-hEfrVOziRuPUf0IzjnA-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 6B06F801B0C; Mon, 21 Feb 2022 16:22:51 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 0D79E7611B; Mon, 21 Feb 2022 16:22:50 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 14/25] KVM: x86/mmu: store shadow EFER.NX in the MMU role Date: Mon, 21 Feb 2022 11:22:32 -0500 Message-Id: <20220221162243.683208-15-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Now that the MMU role is separate from the CPU mode, it can be a truthful description of the format of the shadow pages. This includes whether the shadow pages use the NX bit; so force the efer_nx field of the MMU role when TDP is disabled, and remove the hardcoding it in the callers of reset_shadow_zero_bits_mask. In fact, the initialization of reserved SPTE bits can now be made common to shadow paging and shadow NPT; move it to shadow_mmu_init_context. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 725aef55c08f..bbb5f50d3dcc 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4405,16 +4405,6 @@ static inline u64 reserved_hpa_bits(void) static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - /* - * KVM uses NX when TDP is disabled to handle a variety of scenarios, - * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and - * to generate correct permissions for CR0.WP=3D0/CR4.SMEP=3D1/EFER.NX=3D= 0. - * The iTLB multi-hit workaround can be toggled at any time, so assume - * NX can be used by any non-nested shadow MMU to avoid having to reset - * MMU contexts. Note, KVM forces EFER.NX=3D1 when TDP is disabled. - */ - bool uses_nx =3D is_efer_nx(context) || !tdp_enabled; - /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */ bool is_amd =3D true; /* KVM doesn't use 2-level page tables for the shadow MMU. */ @@ -4426,7 +4416,8 @@ static void reset_shadow_zero_bits_mask(struct kvm_vc= pu *vcpu, =20 shadow_zero_check =3D &context->shadow_zero_check; __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, uses_nx, + context->shadow_root_level, + context->mmu_role.base.efer_nx, guest_can_use_gbpages(vcpu), is_pse, is_amd); =20 if (!shadow_me_mask) @@ -4784,6 +4775,16 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *= vcpu, else role.base.level =3D PT64_ROOT_4LEVEL; =20 + /* + * KVM forces EFER.NX=3D1 when TDP is disabled, reflect it in the MMU rol= e. + * KVM uses NX when TDP is disabled to handle a variety of scenarios, + * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and + * to generate correct permissions for CR0.WP=3D0/CR4.SMEP=3D1/EFER.NX=3D= 0. + * The iTLB multi-hit workaround can be toggled at any time, so assume + * NX can be used by any non-nested shadow MMU to avoid having to reset + * MMU contexts. + */ + role.base.efer_nx =3D true; return role; } =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 08692C433EF for ; Mon, 21 Feb 2022 16:24:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380449AbiBUQYe (ORCPT ); Mon, 21 Feb 2022 11:24:34 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53544 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380235AbiBUQXU (ORCPT ); Mon, 21 Feb 2022 11:23:20 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id B60D827149 for ; Mon, 21 Feb 2022 08:22:55 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460574; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=IUGtxyA5ludofXybBUQiGOW3vId5hlEWaEIO/yvZvr4=; b=SOJmiM7OPM27xCLkHkHTBLrvDriWvp2bf6qoOJwTzorSNQ4W8bJ66BRddBrYIC0/6V4e11 jLTz1Ep68ZNl/lPXIBMoUUlM90PU47weeALqoXiIjKjCv7fYiEhhF/6Cz8nu/OAhbsvfq6 nCy4IG4tt8NmCK0jknrojaVE14ji2mg= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-75-83_FbTQBNNWR8bS-rEWcKQ-1; Mon, 21 Feb 2022 11:22:53 -0500 X-MC-Unique: 83_FbTQBNNWR8bS-rEWcKQ-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id E3BAC1006AA6; Mon, 21 Feb 2022 16:22:51 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 85BB178AA5; Mon, 21 Feb 2022 16:22:51 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 15/25] KVM: x86/mmu: remove extended bits from mmu_role, rename field Date: Mon, 21 Feb 2022 11:22:33 -0500 Message-Id: <20220221162243.683208-16-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" mmu_role represents the role of the root of the page tables. It does not need any extended bits, as those govern only KVM's page table walking; the is_* functions used for page table walking always use the CPU mode. ext.valid is not present anymore in the MMU role, but an all-zero MMU role is impossible because the level field is never zero in the MMU role. So just zap the whole mmu_role in order to force invalidation after CPUID is updated. While making this change, which requires touching almost every occurrence of "mmu_role", rename it to "root_role". Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu/mmu.c | 72 ++++++++++++++++----------------- arch/x86/kvm/mmu/paging_tmpl.h | 4 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 4 files changed, 39 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index 996cf9b14f5e..b7d7c4f31730 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -434,7 +434,7 @@ struct kvm_mmu { void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_mmu_role cpu_mode; - union kvm_mmu_role mmu_role; + union kvm_mmu_page_role root_role; u8 root_level; u8 shadow_root_level; bool direct_map; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index bbb5f50d3dcc..35907badb6ce 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -193,7 +193,7 @@ struct kvm_mmu_role_regs { =20 /* * Yes, lot's of underscores. They're a hint that you probably shouldn't = be - * reading from the role_regs. Once the mmu_role is constructed, it becom= es + * reading from the role_regs. Once the root_role is constructed, it beco= mes * the single source of truth for the MMU's state. */ #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \ @@ -2029,7 +2029,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct k= vm_vcpu *vcpu, int collisions =3D 0; LIST_HEAD(invalid_list); =20 - role =3D vcpu->arch.mmu->mmu_role.base; + role =3D vcpu->arch.mmu->root_role; role.level =3D level; role.direct =3D direct; role.access =3D access; @@ -3265,7 +3265,7 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, s= truct kvm_mmu *mmu) * This should not be called while L2 is active, L2 can't invalidate * _only_ its own roots, e.g. INVVPID unconditionally exits. */ - WARN_ON_ONCE(mmu->mmu_role.base.guest_mode); + WARN_ON_ONCE(mmu->root_role.guest_mode); =20 for (i =3D 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { root_hpa =3D mmu->prev_roots[i].hpa; @@ -4158,7 +4158,7 @@ static bool fast_pgd_switch(struct kvm *kvm, struct k= vm_mmu *mmu, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) { struct kvm_mmu *mmu =3D vcpu->arch.mmu; - union kvm_mmu_page_role new_role =3D mmu->mmu_role.base; + union kvm_mmu_page_role new_role =3D mmu->root_role; =20 if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) { /* kvm_mmu_ensure_valid_pgd will set up a new root. */ @@ -4417,7 +4417,7 @@ static void reset_shadow_zero_bits_mask(struct kvm_vc= pu *vcpu, shadow_zero_check =3D &context->shadow_zero_check; __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), context->shadow_root_level, - context->mmu_role.base.efer_nx, + context->root_role.efer_nx, guest_can_use_gbpages(vcpu), is_pse, is_amd); =20 if (!shadow_me_mask) @@ -4710,22 +4710,21 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_= vcpu *vcpu) return max_tdp_level; } =20 -static union kvm_mmu_role +static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, union kvm_mmu_role cpu_mode) { - union kvm_mmu_role role =3D {0}; + union kvm_mmu_page_role role =3D {0}; =20 - role.base.access =3D ACC_ALL; - role.base.cr0_wp =3D true; - role.base.efer_nx =3D true; - role.base.smm =3D cpu_mode.base.smm; - role.base.guest_mode =3D cpu_mode.base.guest_mode; - role.base.ad_disabled =3D (shadow_accessed_mask =3D=3D 0); - role.base.level =3D kvm_mmu_get_tdp_level(vcpu); - role.base.direct =3D true; - role.base.has_4_byte_gpte =3D false; - role.ext.valid =3D true; + role.access =3D ACC_ALL; + role.cr0_wp =3D true; + role.efer_nx =3D true; + role.smm =3D cpu_mode.base.smm; + role.guest_mode =3D cpu_mode.base.guest_mode; + role.ad_disabled =3D (shadow_accessed_mask =3D=3D 0); + role.level =3D kvm_mmu_get_tdp_level(vcpu); + role.direct =3D true; + role.has_4_byte_gpte =3D false; =20 return role; } @@ -4735,14 +4734,14 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); - union kvm_mmu_role mmu_role =3D kvm_calc_tdp_mmu_root_page_role(vcpu, cpu= _mode); + union kvm_mmu_page_role root_role =3D kvm_calc_tdp_mmu_root_page_role(vcp= u, cpu_mode); =20 if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && - mmu_role.as_u64 =3D=3D context->mmu_role.as_u64) + root_role.word =3D=3D context->root_role.word) return; =20 context->cpu_mode.as_u64 =3D cpu_mode.as_u64; - context->mmu_role.as_u64 =3D mmu_role.as_u64; + context->root_role.word =3D root_role.word; context->page_fault =3D kvm_tdp_page_fault; context->sync_page =3D nonpaging_sync_page; context->invlpg =3D NULL; @@ -4764,7 +4763,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, reset_tdp_shadow_zero_bits_mask(context); } =20 -static union kvm_mmu_role +static union kvm_mmu_page_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, union kvm_mmu_role role) { @@ -4785,19 +4784,19 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu = *vcpu, * MMU contexts. */ role.base.efer_nx =3D true; - return role; + return role.base; } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, union kvm_mmu_role cpu_mode, - union kvm_mmu_role mmu_role) + union kvm_mmu_page_role root_role) { if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && - mmu_role.as_u64 =3D=3D context->mmu_role.as_u64) + root_role.word =3D=3D context->root_role.word) return; =20 context->cpu_mode.as_u64 =3D cpu_mode.as_u64; - context->mmu_role.as_u64 =3D mmu_role.as_u64; + context->root_role.word =3D root_role.word; =20 if (!is_cr0_pg(context)) nonpaging_init_context(context); @@ -4808,7 +4807,7 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte context->root_level =3D cpu_mode.base.level; =20 reset_guest_paging_metadata(vcpu, context); - context->shadow_root_level =3D mmu_role.base.level; + context->shadow_root_level =3D root_role.level; =20 reset_shadow_zero_bits_mask(vcpu, context); } @@ -4818,19 +4817,18 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vc= pu, { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); - union kvm_mmu_role mmu_role =3D + union kvm_mmu_page_role root_role =3D kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode); =20 - shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role); + shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); } =20 -static union kvm_mmu_role +static union kvm_mmu_page_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, union kvm_mmu_role role) { role.base.level =3D kvm_mmu_get_tdp_level(vcpu); - - return role; + return role.base; } =20 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, @@ -4843,9 +4841,9 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u= nsigned long cr0, .efer =3D efer, }; union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); - union kvm_mmu_role mmu_role =3D kvm_calc_shadow_npt_root_page_role(vcpu, = cpu_mode); + union kvm_mmu_page_role root_role =3D kvm_calc_shadow_npt_root_page_role(= vcpu, cpu_mode); =20 - shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role); + shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); kvm_mmu_new_pgd(vcpu, nested_cr3); } EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); @@ -4888,7 +4886,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, if (new_mode.as_u64 !=3D context->cpu_mode.as_u64) { /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */ context->cpu_mode.as_u64 =3D new_mode.as_u64; - context->mmu_role.as_u64 =3D new_mode.as_u64; + context->root_role.word =3D new_mode.base.word; =20 context->shadow_root_level =3D level; =20 @@ -4987,9 +4985,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) * problem is swept under the rug; KVM's CPUID API is horrific and * it's all but impossible to solve it without introducing a new API. */ - vcpu->arch.root_mmu.mmu_role.ext.valid =3D 0; - vcpu->arch.guest_mmu.mmu_role.ext.valid =3D 0; - vcpu->arch.nested_mmu.mmu_role.ext.valid =3D 0; + vcpu->arch.root_mmu.root_role.word =3D 0; + vcpu->arch.guest_mmu.root_role.word =3D 0; + vcpu->arch.nested_mmu.root_role.word =3D 0; vcpu->arch.root_mmu.cpu_mode.ext.valid =3D 0; vcpu->arch.guest_mmu.cpu_mode.ext.valid =3D 0; vcpu->arch.nested_mmu.cpu_mode.ext.valid =3D 0; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 64b6f76641f0..7c0fa115bd56 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -1023,7 +1023,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu,= struct kvm_mmu *mmu, */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { - union kvm_mmu_page_role mmu_role =3D vcpu->arch.mmu->mmu_role.base; + union kvm_mmu_page_role root_role =3D vcpu->arch.mmu->root_role; int i; bool host_writable; gpa_t first_pte_gpa; @@ -1051,7 +1051,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, st= ruct kvm_mmu_page *sp) * reserved bits checks will be wrong, etc... */ if (WARN_ON_ONCE(sp->role.direct || - (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word)) + (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) return -1; =20 first_pte_gpa =3D FNAME(get_level1_sp_gpa)(sp); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index debf08212f12..c18ad86c9a82 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -209,7 +209,7 @@ static void tdp_mmu_init_child_sp(struct kvm_mmu_page *= child_sp, =20 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) { - union kvm_mmu_page_role role =3D vcpu->arch.mmu->mmu_role.base; + union kvm_mmu_page_role role =3D vcpu->arch.mmu->root_role; struct kvm *kvm =3D vcpu->kvm; struct kvm_mmu_page *root; =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EE841C433FE for ; Mon, 21 Feb 2022 16:24:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380401AbiBUQYv (ORCPT ); Mon, 21 Feb 2022 11:24:51 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53618 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380208AbiBUQXW (ORCPT ); Mon, 21 Feb 2022 11:23:22 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id CB2322717F for ; Mon, 21 Feb 2022 08:22:57 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460577; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=tIxBOIs9YqDKt+bVtp2eN+6o6aowPpvnga3h8cz9ah4=; b=ZmjzaGhZ0RbIBEOpaZhyLBrV7x5IDjkbAnq6A521ar2xhI4Bq8R4SPqMO3iY7tjHVuDf6a qJOxw+YY8bQcz44tgIDnWaFQJ+Aj9JHHyicxl2c0XrAQw7wdU3Crnku+ykLcPmQ/n3oMTD UI97BL8URwLCHeZJK+e48WYHjn9Bw84= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-171-7NMPbX9jMCqH-jmTpwH3tw-1; Mon, 21 Feb 2022 11:22:53 -0500 X-MC-Unique: 7NMPbX9jMCqH-jmTpwH3tw-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 688F41006AA0; Mon, 21 Feb 2022 16:22:52 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 09E9D7611B; Mon, 21 Feb 2022 16:22:51 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 16/25] KVM: x86/mmu: rename kvm_mmu_role union Date: Mon, 21 Feb 2022 11:22:34 -0500 Message-Id: <20220221162243.683208-17-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" It is quite confusing that the "full" union is called kvm_mmu_role but is used for the "cpu_mode" field of struct kvm_mmu. Rename it to kvm_mmu_paging_mode. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 6 +++--- arch/x86/kvm/mmu/mmu.c | 28 ++++++++++++++-------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index b7d7c4f31730..3dbe0be075f5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -276,7 +276,7 @@ struct kvm_kernel_irq_routing_entry; /* * kvm_mmu_page_role tracks the properties of a shadow page (where shadow = page * also includes TDP pages) to determine whether or not a page can be used= in - * the given MMU context. This is a subset of the overall kvm_mmu_role to + * the given MMU context. This is a subset of the overall kvm_mmu_paging_= mode to * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows alloca= ting * 2 bytes per gfn instead of 4 bytes per gfn. * @@ -373,7 +373,7 @@ union kvm_mmu_extended_role { }; }; =20 -union kvm_mmu_role { +union kvm_mmu_paging_mode { u64 as_u64; struct { union kvm_mmu_page_role base; @@ -433,7 +433,7 @@ struct kvm_mmu { struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); struct kvm_mmu_root_info root; - union kvm_mmu_role cpu_mode; + union kvm_mmu_paging_mode cpu_mode; union kvm_mmu_page_role root_role; u8 root_level; u8 shadow_root_level; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 35907badb6ce..61499fd7d017 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4658,10 +4658,10 @@ static void paging32_init_context(struct kvm_mmu *c= ontext) context->direct_map =3D false; } =20 -static union kvm_mmu_role +static union kvm_mmu_paging_mode kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *r= egs) { - union kvm_mmu_role role =3D {0}; + union kvm_mmu_paging_mode role =3D {0}; =20 role.base.access =3D ACC_ALL; role.base.smm =3D is_smm(vcpu); @@ -4712,7 +4712,7 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vc= pu *vcpu) =20 static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, - union kvm_mmu_role cpu_mode) + union kvm_mmu_paging_mode cpu_mode) { union kvm_mmu_page_role role =3D {0}; =20 @@ -4733,7 +4733,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); + union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); union kvm_mmu_page_role root_role =3D kvm_calc_tdp_mmu_root_page_role(vcp= u, cpu_mode); =20 if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && @@ -4765,7 +4765,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, =20 static union kvm_mmu_page_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - union kvm_mmu_role role) + union kvm_mmu_paging_mode role) { if (!role.ext.efer_lma) role.base.level =3D PT32E_ROOT_LEVEL; @@ -4788,7 +4788,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *v= cpu, } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, - union kvm_mmu_role cpu_mode, + union kvm_mmu_paging_mode cpu_mode, union kvm_mmu_page_role root_role) { if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && @@ -4816,7 +4816,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); + union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); union kvm_mmu_page_role root_role =3D kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode); =20 @@ -4825,7 +4825,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, =20 static union kvm_mmu_page_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - union kvm_mmu_role role) + union kvm_mmu_paging_mode role) { role.base.level =3D kvm_mmu_get_tdp_level(vcpu); return role.base; @@ -4840,7 +4840,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u= nsigned long cr0, .cr4 =3D cr4 & ~X86_CR4_PKE, .efer =3D efer, }; - union kvm_mmu_role cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); + union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); union kvm_mmu_page_role root_role =3D kvm_calc_shadow_npt_root_page_role(= vcpu, cpu_mode); =20 shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); @@ -4848,11 +4848,11 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu,= unsigned long cr0, } EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); =20 -static union kvm_mmu_role +static union kvm_mmu_paging_mode kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_di= rty, bool execonly, u8 level) { - union kvm_mmu_role role =3D {0}; + union kvm_mmu_paging_mode role =3D {0}; =20 /* * KVM does not support SMM transfer monitors, and consequently does not @@ -4879,7 +4879,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, { struct kvm_mmu *context =3D &vcpu->arch.guest_mmu; u8 level =3D vmx_eptp_page_walk_level(new_eptp); - union kvm_mmu_role new_mode =3D + union kvm_mmu_paging_mode new_mode =3D kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, execonly, level); =20 @@ -4920,7 +4920,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu, =20 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mm= u_role_regs *regs) { - union kvm_mmu_role new_mode =3D kvm_calc_cpu_mode(vcpu, regs); + union kvm_mmu_paging_mode new_mode =3D kvm_calc_cpu_mode(vcpu, regs); struct kvm_mmu *g_context =3D &vcpu->arch.nested_mmu; =20 if (new_mode.as_u64 =3D=3D g_context->cpu_mode.as_u64) @@ -6116,7 +6116,7 @@ int kvm_mmu_module_init(void) */ BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) !=3D sizeof(u32)); BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) !=3D sizeof(u32)); - BUILD_BUG_ON(sizeof(union kvm_mmu_role) !=3D sizeof(u64)); + BUILD_BUG_ON(sizeof(union kvm_mmu_paging_mode) !=3D sizeof(u64)); =20 kvm_mmu_reset_all_pte_masks(); =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 10345C433F5 for ; Mon, 21 Feb 2022 16:25:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380477AbiBUQZ1 (ORCPT ); Mon, 21 Feb 2022 11:25:27 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53602 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380247AbiBUQXV (ORCPT ); Mon, 21 Feb 2022 11:23:21 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 3BA63275D1 for ; Mon, 21 Feb 2022 08:22:58 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460577; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=i3nje5ezz2l8EdII+5hi4MIxNm8UaLF4TH8gVbuFagw=; b=V4cpdK47fRVuVUnmKhfekpv6Kon+2rNkU9sKacaK2xMX8tRShCt/oY4aqlapQErHNM86xQ TVEN/Sv+5ciD/k9+AtJxNfqm/Bl3har1iyKiadbEC5htpahcp92l3ayJCWO7KtlBjFAk/K scNomuG3zl4Dh/N2PH7b5ZN2tc+2QVk= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-590--lw84rjrO9OLbYqALxnYAg-1; Mon, 21 Feb 2022 11:22:53 -0500 X-MC-Unique: -lw84rjrO9OLbYqALxnYAg-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id E0EB11091DA1; Mon, 21 Feb 2022 16:22:52 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 826E37611B; Mon, 21 Feb 2022 16:22:52 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 17/25] KVM: x86/mmu: remove redundant bits from extended role Date: Mon, 21 Feb 2022 11:22:35 -0500 Message-Id: <20220221162243.683208-18-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Before the separation of the CPU and the MMU role, CR0.PG was not available in the base MMU role, because two-dimensional paging always used direct=3D1 in the MMU role. However, now that the raw role is snapshotted in mmu->cpu_mode, CR0.PG *can* be found (though inverted) as !cpu_mode.base.direct. There is no need to store it again in union kvm_mmu_extended_role; instead, write an is_cr0_pg accessor by hand that takes care of the inversion. Likewise, CR4.PAE is now always present in the CPU mode as !cpu_mode.base.has_4_byte_gpte. The inversion makes certain tests on the MMU role easier, and is easily hidden by the is_cr4_pae accessor when operating on the CPU mode. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 -- arch/x86/kvm/mmu/mmu.c | 14 ++++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index 3dbe0be075f5..14f391582738 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -362,8 +362,6 @@ union kvm_mmu_extended_role { struct { unsigned int valid:1; unsigned int execonly:1; - unsigned int cr0_pg:1; - unsigned int cr4_pae:1; unsigned int cr4_pse:1; unsigned int cr4_pke:1; unsigned int cr4_smap:1; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 61499fd7d017..eb7c62c11700 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -223,16 +223,24 @@ static inline bool __maybe_unused is_##reg##_##name(s= truct kvm_mmu *mmu) \ { \ return !!(mmu->cpu_mode. base_or_ext . reg##_##name); \ } -BUILD_MMU_ROLE_ACCESSOR(ext, cr0, pg); BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pae); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); BUILD_MMU_ROLE_ACCESSOR(base, efer, nx); =20 +static inline bool is_cr0_pg(struct kvm_mmu *mmu) +{ + return !mmu->cpu_mode.base.direct; +} + +static inline bool is_cr4_pae(struct kvm_mmu *mmu) +{ + return !mmu->cpu_mode.base.has_4_byte_gpte; +} + static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu) { struct kvm_mmu_role_regs regs =3D { @@ -4681,8 +4689,6 @@ kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct= kvm_mmu_role_regs *regs) else role.base.level =3D PT32_ROOT_LEVEL; =20 - role.ext.cr0_pg =3D 1; - role.ext.cr4_pae =3D ____is_cr4_pae(regs); role.ext.cr4_smep =3D ____is_cr4_smep(regs); role.ext.cr4_smap =3D ____is_cr4_smap(regs); role.ext.cr4_pse =3D ____is_cr4_pse(regs); --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1BDB0C433F5 for ; Mon, 21 Feb 2022 16:24:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380431AbiBUQY1 (ORCPT ); Mon, 21 Feb 2022 11:24:27 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53568 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380245AbiBUQXV (ORCPT ); Mon, 21 Feb 2022 11:23:21 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id F159427170 for ; Mon, 21 Feb 2022 08:22:56 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460576; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=wHGN6zxyDVkn7jr8uMe3K7F2sy2WSgPe2xYpHBh23mU=; b=geCqrMBKYy+GEIRCHkleDCC2KR2yxIOR0pjbrhFZSgCIUBrxRs3MnVrmzRhhUVSXmA2e4r TmuMFGghht4A0IDnRItTdI/P5S3kg2AfnHiytgcPGX1cfRq9Rw/dVNbwwj6LCl2m6eWo92 jOnEkBtCbVC3IBwCx5jv6bv8CHz2tek= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-364-6jTrldkjMgWmgswhnEZgEw-1; Mon, 21 Feb 2022 11:22:54 -0500 X-MC-Unique: 6jTrldkjMgWmgswhnEZgEw-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 6504181424C; Mon, 21 Feb 2022 16:22:53 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 076C578AA5; Mon, 21 Feb 2022 16:22:52 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 18/25] KVM: x86/mmu: remove valid from extended role Date: Mon, 21 Feb 2022 11:22:36 -0500 Message-Id: <20220221162243.683208-19-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The level and direct field of the CPU mode can act as a marker for validity instead: exactly one of them is guaranteed to be nonzero, so a zero value for both means that the role is invalid and the MMU properties will be computed again. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 4 +--- arch/x86/kvm/mmu/mmu.c | 8 +++----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index 14f391582738..b8b115b2038f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -342,8 +342,7 @@ union kvm_mmu_page_role { * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties * relevant to the current MMU configuration. When loading CR0, CR4, or = EFER, * including on nested transitions, if nothing in the full role changes th= en - * MMU re-configuration can be skipped. @valid bit is set on first usage s= o we - * don't treat all-zero structure as valid data. + * MMU re-configuration can be skipped. * * The properties that are tracked in the extended role but not the page r= ole * are for things that either (a) do not affect the validity of the shadow= page @@ -360,7 +359,6 @@ union kvm_mmu_page_role { union kvm_mmu_extended_role { u32 word; struct { - unsigned int valid:1; unsigned int execonly:1; unsigned int cr4_pse:1; unsigned int cr4_pke:1; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index eb7c62c11700..d657e2e2ceec 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4699,7 +4699,6 @@ kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct= kvm_mmu_role_regs *regs) role.ext.efer_lma =3D ____is_efer_lma(regs); } =20 - role.ext.valid =3D 1; return role; } =20 @@ -4874,7 +4873,6 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *v= cpu, bool accessed_dirty, =20 role.ext.word =3D 0; role.ext.execonly =3D execonly; - role.ext.valid =3D 1; =20 return role; } @@ -4994,9 +4992,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.root_mmu.root_role.word =3D 0; vcpu->arch.guest_mmu.root_role.word =3D 0; vcpu->arch.nested_mmu.root_role.word =3D 0; - vcpu->arch.root_mmu.cpu_mode.ext.valid =3D 0; - vcpu->arch.guest_mmu.cpu_mode.ext.valid =3D 0; - vcpu->arch.nested_mmu.cpu_mode.ext.valid =3D 0; + vcpu->arch.root_mmu.cpu_mode.as_u64 =3D 0; + vcpu->arch.guest_mmu.cpu_mode.as_u64 =3D 0; + vcpu->arch.nested_mmu.cpu_mode.as_u64 =3D 0; kvm_mmu_reset_context(vcpu); =20 /* --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4CC9DC433F5 for ; Mon, 21 Feb 2022 16:24:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380450AbiBUQZK (ORCPT ); Mon, 21 Feb 2022 11:25:10 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53568 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380255AbiBUQXW (ORCPT ); Mon, 21 Feb 2022 11:23:22 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 9B62E27149 for ; Mon, 21 Feb 2022 08:22:59 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460578; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=Ew2fA4AqH9vKgQPNp0OjXmbIreACjwPwyBewMu02duQ=; b=adjyySv/L/T32JTC43dIzEqzZGYTFDDk9cMBG8TVh2TCq2LfktcUzl1ezvndmH9wLBT56L supbtF76gwph5WzTomrQAPv+RDeEHzpi/4KL6Z2i3iWNGQOGLdHs5lj1cC/r1lp8IUncFX 36UM8rKRq30hwwCMH37znf2fbZjaBcc= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-59-DqE0oH5vOpqHnLkcBe7Y_w-1; Mon, 21 Feb 2022 11:22:55 -0500 X-MC-Unique: DqE0oH5vOpqHnLkcBe7Y_w-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id DE7301926DA2; Mon, 21 Feb 2022 16:22:53 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 7FB347611B; Mon, 21 Feb 2022 16:22:53 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 19/25] KVM: x86/mmu: simplify and/or inline computation of shadow MMU roles Date: Mon, 21 Feb 2022 11:22:37 -0500 Message-Id: <20220221162243.683208-20-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Shadow MMUs compute their role from cpu_mode.base, simply by adjusting the root level. It's one line of code, so do not place it in a separate function. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 54 +++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 35 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d657e2e2ceec..47288643ab70 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4768,30 +4768,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, reset_tdp_shadow_zero_bits_mask(context); } =20 -static union kvm_mmu_page_role -kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - union kvm_mmu_paging_mode role) -{ - if (!role.ext.efer_lma) - role.base.level =3D PT32E_ROOT_LEVEL; - else if (role.ext.cr4_la57) - role.base.level =3D PT64_ROOT_5LEVEL; - else - role.base.level =3D PT64_ROOT_4LEVEL; - - /* - * KVM forces EFER.NX=3D1 when TDP is disabled, reflect it in the MMU rol= e. - * KVM uses NX when TDP is disabled to handle a variety of scenarios, - * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and - * to generate correct permissions for CR0.WP=3D0/CR4.SMEP=3D1/EFER.NX=3D= 0. - * The iTLB multi-hit workaround can be toggled at any time, so assume - * NX can be used by any non-nested shadow MMU to avoid having to reset - * MMU contexts. - */ - role.base.efer_nx =3D true; - return role.base; -} - static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, union kvm_mmu_paging_mode cpu_mode, union kvm_mmu_page_role root_role) @@ -4822,18 +4798,23 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vc= pu, { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); - union kvm_mmu_page_role root_role =3D - kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode); + union kvm_mmu_page_role root_role; =20 - shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); -} + root_role =3D cpu_mode.base; + root_role.level =3D max_t(u32, root_role.level, PT32E_ROOT_LEVEL); =20 -static union kvm_mmu_page_role -kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - union kvm_mmu_paging_mode role) -{ - role.base.level =3D kvm_mmu_get_tdp_level(vcpu); - return role.base; + /* + * KVM forces EFER.NX=3D1 when TDP is disabled, reflect it in the MMU rol= e. + * KVM uses NX when TDP is disabled to handle a variety of scenarios, + * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and + * to generate correct permissions for CR0.WP=3D0/CR4.SMEP=3D1/EFER.NX=3D= 0. + * The iTLB multi-hit workaround can be toggled at any time, so assume + * NX can be used by any non-nested shadow MMU to avoid having to reset + * MMU contexts. + */ + root_role.efer_nx =3D true; + + shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); } =20 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, @@ -4846,7 +4827,10 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, = unsigned long cr0, .efer =3D efer, }; union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); - union kvm_mmu_page_role root_role =3D kvm_calc_shadow_npt_root_page_role(= vcpu, cpu_mode); + union kvm_mmu_page_role root_role; + + root_role =3D cpu_mode.base; + root_role.level =3D kvm_mmu_get_tdp_level(vcpu); =20 shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); kvm_mmu_new_pgd(vcpu, nested_cr3); --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C7F56C433F5 for ; Mon, 21 Feb 2022 16:24:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380347AbiBUQYz (ORCPT ); Mon, 21 Feb 2022 11:24:55 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53592 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380261AbiBUQXY (ORCPT ); Mon, 21 Feb 2022 11:23:24 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 5702327154 for ; Mon, 21 Feb 2022 08:23:00 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460579; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=ZC/3vO6f2ZKAzs9BdN9A8dm2vBnERQMJFs75uTXpSKk=; b=TWrDlZe7FePybEGVHFI1sUAb4re0PH+ybNkUl+omwCnQIAfvn/JpbZfjgKj0I1w2kpN+7s 26IM8ihVe9R2pL0TLveaqpJAWnOFn5JnVR346PEbpV42sfGDlAhHk7iTRBGeI7jJISfcel B+bU7RSSmh9sv2dGKj4qyYdDvScXmKc= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-353-16XvRrf_M7i11pCLtcOs7g-1; Mon, 21 Feb 2022 11:22:55 -0500 X-MC-Unique: 16XvRrf_M7i11pCLtcOs7g-1 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id AE5941091DA4; Mon, 21 Feb 2022 16:22:54 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 4FE5484A1C; Mon, 21 Feb 2022 16:22:54 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 20/25] KVM: x86/mmu: pull CPU mode computation to kvm_init_mmu Date: Mon, 21 Feb 2022 11:22:38 -0500 Message-Id: <20220221162243.683208-21-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Do not lead init_kvm_*mmu into the temptation of poking into struct kvm_mmu_role_regs, by passing to it directly the CPU mode. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 47288643ab70..a7028c2ae5c7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4734,11 +4734,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcp= u, return role; } =20 -static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_paging_m= ode cpu_mode) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); union kvm_mmu_page_role root_role =3D kvm_calc_tdp_mmu_root_page_role(vcp= u, cpu_mode); =20 if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && @@ -4794,10 +4792,9 @@ static void shadow_mmu_init_context(struct kvm_vcpu = *vcpu, struct kvm_mmu *conte } =20 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_paging_mode cpu_mode) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, regs); union kvm_mmu_page_role root_role; =20 root_role =3D cpu_mode.base; @@ -4895,20 +4892,19 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu,= bool execonly, EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); =20 static void init_kvm_softmmu(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_paging_mode cpu_mode) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; =20 - kvm_init_shadow_mmu(vcpu, regs); + kvm_init_shadow_mmu(vcpu, cpu_mode); =20 context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault_shadow; } =20 -static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mm= u_role_regs *regs) +static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_pagin= g_mode new_mode) { - union kvm_mmu_paging_mode new_mode =3D kvm_calc_cpu_mode(vcpu, regs); struct kvm_mmu *g_context =3D &vcpu->arch.nested_mmu; =20 if (new_mode.as_u64 =3D=3D g_context->cpu_mode.as_u64) @@ -4949,13 +4945,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vc= pu, const struct kvm_mmu_role void kvm_init_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu_role_regs regs =3D vcpu_to_role_regs(vcpu); + union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); =20 if (mmu_is_nested(vcpu)) - init_kvm_nested_mmu(vcpu, ®s); + init_kvm_nested_mmu(vcpu, cpu_mode); else if (tdp_enabled) - init_kvm_tdp_mmu(vcpu, ®s); + init_kvm_tdp_mmu(vcpu, cpu_mode); else - init_kvm_softmmu(vcpu, ®s); + init_kvm_softmmu(vcpu, cpu_mode); } EXPORT_SYMBOL_GPL(kvm_init_mmu); =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1D61AC433F5 for ; Mon, 21 Feb 2022 16:25:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380464AbiBUQZV (ORCPT ); Mon, 21 Feb 2022 11:25:21 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53620 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380251AbiBUQXW (ORCPT ); Mon, 21 Feb 2022 11:23:22 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 99A7B2714D for ; Mon, 21 Feb 2022 08:22:58 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460577; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=R0A3zguA6WQgIcTBIC07MyIgv+7xo6imyD0/mQM+TIw=; b=JT1DtioQ8aFbsLP41VQFNmgLBTrDUQ8rAJ6hfwrX7Ad15saVzo+QW1bVzgmDdM5lWMvbju GRG7ZBBkB2JCb5WwubTelbz4OrzyIX+vcJBKD2HEe7TkApY4nt6weOKpEHX3M96FxF2JM5 Z5UXeobHHHg8ORZbVPilB36pk5boBSE= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-403-QrveasqgO3iegvZSII8QFw-1; Mon, 21 Feb 2022 11:22:56 -0500 X-MC-Unique: QrveasqgO3iegvZSII8QFw-1 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 4ED9B801B0B; Mon, 21 Feb 2022 16:22:55 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id C8D4B84A0E; Mon, 21 Feb 2022 16:22:54 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 21/25] KVM: x86/mmu: replace shadow_root_level with root_role.level Date: Mon, 21 Feb 2022 11:22:39 -0500 Message-Id: <20220221162243.683208-22-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" root_role.level is always the same value as shadow_level: - it's kvm_mmu_get_tdp_level(vcpu) when going through init_kvm_tdp_mmu - it's the level argument when going through kvm_init_shadow_ept_mmu - it's assigned directly from new_role.base.level when going through shadow_mmu_init_context Remove the duplication and get the level directly from the role. Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu.h | 2 +- arch/x86/kvm/mmu/mmu.c | 33 ++++++++++++++------------------- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- 6 files changed, 18 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index b8b115b2038f..81897aa4e669 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -432,7 +432,6 @@ struct kvm_mmu { union kvm_mmu_paging_mode cpu_mode; union kvm_mmu_page_role root_role; u8 root_level; - u8 shadow_root_level; bool direct_map; struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; =20 diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 6ee4436e46f1..596ac2d424fc 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -113,7 +113,7 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vc= pu) return; =20 static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa, - vcpu->arch.mmu->shadow_root_level); + vcpu->arch.mmu->root_role.level); } =20 extern unsigned long kvm_get_guest_cr3(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a7028c2ae5c7..c33879f23e94 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2127,7 +2127,7 @@ static void shadow_walk_init_using_root(struct kvm_sh= adow_walk_iterator *iterato { iterator->addr =3D addr; iterator->shadow_addr =3D root; - iterator->level =3D vcpu->arch.mmu->shadow_root_level; + iterator->level =3D vcpu->arch.mmu->root_role.level; =20 if (iterator->level >=3D PT64_ROOT_4LEVEL && vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && @@ -3316,7 +3316,7 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gf= n_t gfn, gva_t gva, static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu =3D vcpu->arch.mmu; - u8 shadow_root_level =3D mmu->shadow_root_level; + u8 shadow_root_level =3D mmu->root_role.level; hpa_t root; unsigned i; int r; @@ -3466,7 +3466,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) */ if (mmu->root_level >=3D PT64_ROOT_4LEVEL) { root =3D mmu_alloc_root(vcpu, root_gfn, 0, - mmu->shadow_root_level, false); + mmu->root_role.level, false); mmu->root.hpa =3D root; goto set_root_pgd; } @@ -3482,7 +3482,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) * the shadow page table may be a PAE or a long mode page table. */ pm_mask =3D PT_PRESENT_MASK | shadow_me_mask; - if (mmu->shadow_root_level >=3D PT64_ROOT_4LEVEL) { + if (mmu->root_role.level >=3D PT64_ROOT_4LEVEL) { pm_mask |=3D PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; =20 if (WARN_ON_ONCE(!mmu->pml4_root)) { @@ -3491,7 +3491,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) } mmu->pml4_root[0] =3D __pa(mmu->pae_root) | pm_mask; =20 - if (mmu->shadow_root_level =3D=3D PT64_ROOT_5LEVEL) { + if (mmu->root_role.level =3D=3D PT64_ROOT_5LEVEL) { if (WARN_ON_ONCE(!mmu->pml5_root)) { r =3D -EIO; goto out_unlock; @@ -3516,9 +3516,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) mmu->pae_root[i] =3D root | pm_mask; } =20 - if (mmu->shadow_root_level =3D=3D PT64_ROOT_5LEVEL) + if (mmu->root_role.level =3D=3D PT64_ROOT_5LEVEL) mmu->root.hpa =3D __pa(mmu->pml5_root); - else if (mmu->shadow_root_level =3D=3D PT64_ROOT_4LEVEL) + else if (mmu->root_role.level =3D=3D PT64_ROOT_4LEVEL) mmu->root.hpa =3D __pa(mmu->pml4_root); else mmu->root.hpa =3D __pa(mmu->pae_root); @@ -3534,7 +3534,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu =3D vcpu->arch.mmu; - bool need_pml5 =3D mmu->shadow_root_level > PT64_ROOT_4LEVEL; + bool need_pml5 =3D mmu->root_role.level > PT64_ROOT_4LEVEL; u64 *pml5_root =3D NULL; u64 *pml4_root =3D NULL; u64 *pae_root; @@ -3546,7 +3546,7 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *v= cpu) * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. */ if (mmu->direct_map || mmu->root_level >=3D PT64_ROOT_4LEVEL || - mmu->shadow_root_level < PT64_ROOT_4LEVEL) + mmu->root_role.level < PT64_ROOT_4LEVEL) return 0; =20 /* @@ -4420,18 +4420,18 @@ static void reset_shadow_zero_bits_mask(struct kvm_= vcpu *vcpu, struct rsvd_bits_validate *shadow_zero_check; int i; =20 - WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL); + WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL); =20 shadow_zero_check =3D &context->shadow_zero_check; __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, + context->root_role.level, context->root_role.efer_nx, guest_can_use_gbpages(vcpu), is_pse, is_amd); =20 if (!shadow_me_mask) return; =20 - for (i =3D context->shadow_root_level; --i >=3D 0;) { + for (i =3D context->root_role.level; --i >=3D 0;) { shadow_zero_check->rsvd_bits_mask[0][i] &=3D ~shadow_me_mask; shadow_zero_check->rsvd_bits_mask[1][i] &=3D ~shadow_me_mask; } @@ -4458,7 +4458,7 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *conte= xt) =20 if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, false, + context->root_role.level, false, boot_cpu_has(X86_FEATURE_GBPAGES), false, true); else @@ -4469,7 +4469,7 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *conte= xt) if (!shadow_me_mask) return; =20 - for (i =3D context->shadow_root_level; --i >=3D 0;) { + for (i =3D context->root_role.level; --i >=3D 0;) { shadow_zero_check->rsvd_bits_mask[0][i] &=3D ~shadow_me_mask; shadow_zero_check->rsvd_bits_mask[1][i] &=3D ~shadow_me_mask; } @@ -4748,7 +4748,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, u= nion kvm_mmu_paging_mode cp context->page_fault =3D kvm_tdp_page_fault; context->sync_page =3D nonpaging_sync_page; context->invlpg =3D NULL; - context->shadow_root_level =3D kvm_mmu_get_tdp_level(vcpu); context->direct_map =3D true; context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; @@ -4786,8 +4785,6 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte context->root_level =3D cpu_mode.base.level; =20 reset_guest_paging_metadata(vcpu, context); - context->shadow_root_level =3D root_role.level; - reset_shadow_zero_bits_mask(vcpu, context); } =20 @@ -4873,8 +4870,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, context->cpu_mode.as_u64 =3D new_mode.as_u64; context->root_role.word =3D new_mode.base.word; =20 - context->shadow_root_level =3D level; - context->page_fault =3D ept_page_fault; context->gva_to_gpa =3D ept_gva_to_gpa; context->sync_page =3D ept_sync_page; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index c18ad86c9a82..ab8c09db4c19 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1697,7 +1697,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 a= ddr, u64 *sptes, gfn_t gfn =3D addr >> PAGE_SHIFT; int leaf =3D -1; =20 - *root_level =3D vcpu->arch.mmu->shadow_root_level; + *root_level =3D vcpu->arch.mmu->root_role.level; =20 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { leaf =3D iter.level; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7038c76fa841..d169221d9305 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3863,7 +3863,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, h= pa_t root_hpa, hv_track_root_tdp(vcpu, root_hpa); =20 cr3 =3D vcpu->arch.cr3; - } else if (vcpu->arch.mmu->shadow_root_level >=3D PT64_ROOT_4LEVEL) { + } else if (vcpu->arch.mmu->root_role.level >=3D PT64_ROOT_4LEVEL) { cr3 =3D __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); } else { /* PCID in the guest should be impossible with a 32-bit MMU. */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b183dfc41d74..00a79b82fa46 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2960,7 +2960,7 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vc= pu) =20 if (enable_ept) ept_sync_context(construct_eptp(vcpu, root_hpa, - mmu->shadow_root_level)); + mmu->root_role.level)); else vpid_sync_context(vmx_get_current_vpid(vcpu)); } --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4FF13C433F5 for ; Mon, 21 Feb 2022 16:24:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380293AbiBUQYj (ORCPT ); Mon, 21 Feb 2022 11:24:39 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53624 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380253AbiBUQXW (ORCPT ); Mon, 21 Feb 2022 11:23:22 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id E5E01275E9 for ; Mon, 21 Feb 2022 08:22:58 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460578; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=+KhaAC9O/5C59/coRNji/kl3043mlJPSeKxmER3DOQQ=; b=RH7DbymCf7vh1FvlW3pTx2yIpJ7xvn0sTS90e04/QnPt4JARw+NDTZhx0UJZiyHuFBLatC ncAHF+bcZ+Gl4J47Hk3es6HpSICgL1RycYJnLV4E+bBLvEe2KQvWwU5v0SGHr3bFevvmy6 is27Sw1eWJ00IQluFqPf8uFifBW53Yo= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-662-s4R2jZkeO6WtRfA3T5yB2Q-1; Mon, 21 Feb 2022 11:22:56 -0500 X-MC-Unique: s4R2jZkeO6WtRfA3T5yB2Q-1 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id B9DDF2F4A; Mon, 21 Feb 2022 16:22:55 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 5BF5584A0E; Mon, 21 Feb 2022 16:22:55 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 22/25] KVM: x86/mmu: replace root_level with cpu_mode.base.level Date: Mon, 21 Feb 2022 11:22:40 -0500 Message-Id: <20220221162243.683208-23-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Remove another duplicate field of struct kvm_mmu. This time it's the root level for page table walking; the separate field is always initialized as cpu_mode.base.level, so its users can look up the CPU mode directly instead. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 18 +++++++----------- arch/x86/kvm/mmu/paging_tmpl.h | 4 ++-- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index 81897aa4e669..ec89b1a488c5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -431,7 +431,6 @@ struct kvm_mmu { struct kvm_mmu_root_info root; union kvm_mmu_paging_mode cpu_mode; union kvm_mmu_page_role root_role; - u8 root_level; bool direct_map; struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; =20 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c33879f23e94..0c88d4206715 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2130,7 +2130,7 @@ static void shadow_walk_init_using_root(struct kvm_sh= adow_walk_iterator *iterato iterator->level =3D vcpu->arch.mmu->root_role.level; =20 if (iterator->level >=3D PT64_ROOT_4LEVEL && - vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && + vcpu->arch.mmu->cpu_mode.base.level < PT64_ROOT_4LEVEL && !vcpu->arch.mmu->direct_map) iterator->level =3D PT32E_ROOT_LEVEL; =20 @@ -3440,7 +3440,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) * On SVM, reading PDPTRs might access guest memory, which might fault * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. */ - if (mmu->root_level =3D=3D PT32E_ROOT_LEVEL) { + if (mmu->cpu_mode.base.level =3D=3D PT32E_ROOT_LEVEL) { for (i =3D 0; i < 4; ++i) { pdptrs[i] =3D mmu->get_pdptr(vcpu, i); if (!(pdptrs[i] & PT_PRESENT_MASK)) @@ -3464,7 +3464,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ - if (mmu->root_level >=3D PT64_ROOT_4LEVEL) { + if (mmu->cpu_mode.base.level >=3D PT64_ROOT_4LEVEL) { root =3D mmu_alloc_root(vcpu, root_gfn, 0, mmu->root_role.level, false); mmu->root.hpa =3D root; @@ -3503,7 +3503,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vc= pu) for (i =3D 0; i < 4; ++i) { WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); =20 - if (mmu->root_level =3D=3D PT32E_ROOT_LEVEL) { + if (mmu->cpu_mode.base.level =3D=3D PT32E_ROOT_LEVEL) { if (!(pdptrs[i] & PT_PRESENT_MASK)) { mmu->pae_root[i] =3D INVALID_PAE_ROOT; continue; @@ -3545,7 +3545,7 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *v= cpu) * equivalent level in the guest's NPT to shadow. Allocate the tables * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. */ - if (mmu->direct_map || mmu->root_level >=3D PT64_ROOT_4LEVEL || + if (mmu->direct_map || mmu->cpu_mode.base.level >=3D PT64_ROOT_4LEVEL || mmu->root_role.level < PT64_ROOT_4LEVEL) return 0; =20 @@ -3642,7 +3642,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) =20 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); =20 - if (vcpu->arch.mmu->root_level >=3D PT64_ROOT_4LEVEL) { + if (vcpu->arch.mmu->cpu_mode.base.level >=3D PT64_ROOT_4LEVEL) { hpa_t root =3D vcpu->arch.mmu->root.hpa; sp =3D to_shadow_page(root); =20 @@ -4348,7 +4348,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vc= pu, { __reset_rsvds_bits_mask(&context->guest_rsvd_check, vcpu->arch.reserved_gpa_bits, - context->root_level, is_efer_nx(context), + context->cpu_mode.base.level, is_efer_nx(context), guest_can_use_gbpages(vcpu), is_cr4_pse(context), guest_cpuid_is_amd_or_hygon(vcpu)); @@ -4752,7 +4752,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, u= nion kvm_mmu_paging_mode cp context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault; - context->root_level =3D cpu_mode.base.level; =20 if (!is_cr0_pg(context)) context->gva_to_gpa =3D nonpaging_gva_to_gpa; @@ -4782,7 +4781,6 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte paging64_init_context(context); else paging32_init_context(context); - context->root_level =3D cpu_mode.base.level; =20 reset_guest_paging_metadata(vcpu, context); reset_shadow_zero_bits_mask(vcpu, context); @@ -4874,7 +4872,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, context->gva_to_gpa =3D ept_gva_to_gpa; context->sync_page =3D ept_sync_page; context->invlpg =3D ept_invlpg; - context->root_level =3D level; context->direct_map =3D false; update_permission_bitmask(context, true); context->pkru_mask =3D 0; @@ -4909,7 +4906,6 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu= , union kvm_mmu_paging_mode g_context->get_guest_pgd =3D kvm_get_guest_cr3; g_context->get_pdptr =3D kvm_pdptr_read; g_context->inject_page_fault =3D kvm_inject_page_fault; - g_context->root_level =3D new_mode.base.level; =20 /* * L2 page tables are never shadowed, so there is no need to sync diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 7c0fa115bd56..bdfd38b0f8e6 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -361,7 +361,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker= *walker, =20 trace_kvm_mmu_pagetable_walk(addr, access); retry_walk: - walker->level =3D mmu->root_level; + walker->level =3D mmu->cpu_mode.base.level; pte =3D kvm_mmu_get_guest_pgd(vcpu, mmu); have_ad =3D PT_HAVE_ACCESSED_DIRTY(mmu); =20 @@ -656,7 +656,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct k= vm_page_fault *fault, WARN_ON_ONCE(gw->gfn !=3D base_gfn); direct_access =3D gw->pte_access; =20 - top_level =3D vcpu->arch.mmu->root_level; + top_level =3D vcpu->arch.mmu->cpu_mode.base.level; if (top_level =3D=3D PT32E_ROOT_LEVEL) top_level =3D PT32_ROOT_LEVEL; /* --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 76EE9C433F5 for ; Mon, 21 Feb 2022 16:24:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380235AbiBUQZP (ORCPT ); Mon, 21 Feb 2022 11:25:15 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53658 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380257AbiBUQXY (ORCPT ); Mon, 21 Feb 2022 11:23:24 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id C968B27150 for ; Mon, 21 Feb 2022 08:22:59 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460579; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=KNp58cP8D6qKObICCW7/tzcwMUMOB6WPr2g4EItcwmc=; b=ZtG0UdZ6FcosktqjCwwbr6rKZJSw0vIsHyn875a0lQaszjsNVsa+B+nDOYNoh/dJBjgnte WGE7VDwFT+r+wJ6xd1rf3UwDHHh1PJlEFgSVF+ci47TFxWLU4n4P1cCPcyoZHNgC2ZYj+m 4DR2EtK+lj44k1QGBndOMj45JDQ4Ypg= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-62-9vcQgLu8OLKi3e1EfUZuhw-1; Mon, 21 Feb 2022 11:22:57 -0500 X-MC-Unique: 9vcQgLu8OLKi3e1EfUZuhw-1 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 3EBF281424C; Mon, 21 Feb 2022 16:22:56 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id D4AA384A0E; Mon, 21 Feb 2022 16:22:55 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 23/25] KVM: x86/mmu: replace direct_map with root_role.direct Date: Mon, 21 Feb 2022 11:22:41 -0500 Message-Id: <20220221162243.683208-24-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" direct_map is always equal to the direct field of the root page's role: - for shadow paging, direct_map is true if CR0.PG=3D0 and root_role.direct = is copied from cpu_mode.base.direct - for TDP, it is always true and root_role.direct is also always true - for shadow EPT, it is always false and root_role.direct is also always false Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 27 ++++++++++++--------------- arch/x86/kvm/x86.c | 12 ++++++------ 3 files changed, 18 insertions(+), 22 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index ec89b1a488c5..af90d0653139 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -431,7 +431,6 @@ struct kvm_mmu { struct kvm_mmu_root_info root; union kvm_mmu_paging_mode cpu_mode; union kvm_mmu_page_role root_role; - bool direct_map; struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; =20 /* diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0c88d4206715..8eb2c0373309 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2029,7 +2029,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct k= vm_vcpu *vcpu, int direct, unsigned int access) { - bool direct_mmu =3D vcpu->arch.mmu->direct_map; + bool direct_mmu =3D vcpu->arch.mmu->root_role.direct; union kvm_mmu_page_role role; struct hlist_head *sp_list; unsigned quadrant; @@ -2131,7 +2131,7 @@ static void shadow_walk_init_using_root(struct kvm_sh= adow_walk_iterator *iterato =20 if (iterator->level >=3D PT64_ROOT_4LEVEL && vcpu->arch.mmu->cpu_mode.base.level < PT64_ROOT_4LEVEL && - !vcpu->arch.mmu->direct_map) + !vcpu->arch.mmu->root_role.direct) iterator->level =3D PT32E_ROOT_LEVEL; =20 if (iterator->level =3D=3D PT32E_ROOT_LEVEL) { @@ -2507,7 +2507,7 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcp= u *vcpu, gva_t gva) gpa_t gpa; int r; =20 - if (vcpu->arch.mmu->direct_map) + if (vcpu->arch.mmu->root_role.direct) return 0; =20 gpa =3D kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); @@ -3545,7 +3545,8 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *v= cpu) * equivalent level in the guest's NPT to shadow. Allocate the tables * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. */ - if (mmu->direct_map || mmu->cpu_mode.base.level >=3D PT64_ROOT_4LEVEL || + if (mmu->root_role.direct || + mmu->cpu_mode.base.level >=3D PT64_ROOT_4LEVEL || mmu->root_role.level < PT64_ROOT_4LEVEL) return 0; =20 @@ -3634,7 +3635,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) int i; struct kvm_mmu_page *sp; =20 - if (vcpu->arch.mmu->direct_map) + if (vcpu->arch.mmu->root_role.direct) return; =20 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa)) @@ -3854,7 +3855,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *= vcpu, gpa_t cr2_or_gpa, =20 arch.token =3D (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.gfn =3D gfn; - arch.direct_map =3D mmu->direct_map; + arch.direct_map =3D mmu->root_role.direct; arch.cr3 =3D kvm_mmu_get_guest_pgd(vcpu, mmu); =20 return kvm_setup_async_pf(vcpu, cr2_or_gpa, @@ -4072,7 +4073,6 @@ static void nonpaging_init_context(struct kvm_mmu *co= ntext) context->gva_to_gpa =3D nonpaging_gva_to_gpa; context->sync_page =3D nonpaging_sync_page; context->invlpg =3D NULL; - context->direct_map =3D true; } =20 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pg= d, @@ -4654,7 +4654,6 @@ static void paging64_init_context(struct kvm_mmu *con= text) context->gva_to_gpa =3D paging64_gva_to_gpa; context->sync_page =3D paging64_sync_page; context->invlpg =3D paging64_invlpg; - context->direct_map =3D false; } =20 static void paging32_init_context(struct kvm_mmu *context) @@ -4663,7 +4662,6 @@ static void paging32_init_context(struct kvm_mmu *con= text) context->gva_to_gpa =3D paging32_gva_to_gpa; context->sync_page =3D paging32_sync_page; context->invlpg =3D paging32_invlpg; - context->direct_map =3D false; } =20 static union kvm_mmu_paging_mode @@ -4748,7 +4746,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, u= nion kvm_mmu_paging_mode cp context->page_fault =3D kvm_tdp_page_fault; context->sync_page =3D nonpaging_sync_page; context->invlpg =3D NULL; - context->direct_map =3D true; context->get_guest_pgd =3D kvm_get_guest_cr3; context->get_pdptr =3D kvm_pdptr_read; context->inject_page_fault =3D kvm_inject_page_fault; @@ -4872,7 +4869,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, context->gva_to_gpa =3D ept_gva_to_gpa; context->sync_page =3D ept_sync_page; context->invlpg =3D ept_invlpg; - context->direct_map =3D false; + update_permission_bitmask(context, true); context->pkru_mask =3D 0; reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level); @@ -4987,13 +4984,13 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) { int r; =20 - r =3D mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); + r =3D mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct); if (r) goto out; r =3D mmu_alloc_special_roots(vcpu); if (r) goto out; - if (vcpu->arch.mmu->direct_map) + if (vcpu->arch.mmu->root_role.direct) r =3D mmu_alloc_direct_roots(vcpu); else r =3D mmu_alloc_shadow_roots(vcpu); @@ -5197,7 +5194,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t c= r2_or_gpa, u64 error_code, void *insn, int insn_len) { int r, emulation_type =3D EMULTYPE_PF; - bool direct =3D vcpu->arch.mmu->direct_map; + bool direct =3D vcpu->arch.mmu->root_role.direct; =20 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) return RET_PF_RETRY; @@ -5228,7 +5225,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t c= r2_or_gpa, u64 error_code, * paging in both guests. If true, we simply unprotect the page * and resume the guest. */ - if (vcpu->arch.mmu->direct_map && + if (vcpu->arch.mmu->root_role.direct && (error_code & PFERR_NESTED_GUEST_PAGE) =3D=3D PFERR_NESTED_GUEST_PAGE= ) { kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); return 1; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1546a25a9307..53730e81ceb5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8016,7 +8016,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vc= pu, gpa_t cr2_or_gpa, WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) return false; =20 - if (!vcpu->arch.mmu->direct_map) { + if (!vcpu->arch.mmu->root_role.direct) { /* * Write permission should be allowed since only * write access need to be emulated. @@ -8049,7 +8049,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vc= pu, gpa_t cr2_or_gpa, kvm_release_pfn_clean(pfn); =20 /* The instructions are well-emulated on direct mmu. */ - if (vcpu->arch.mmu->direct_map) { + if (vcpu->arch.mmu->root_role.direct) { unsigned int indirect_shadow_pages; =20 write_lock(&vcpu->kvm->mmu_lock); @@ -8117,7 +8117,7 @@ static bool retry_instruction(struct x86_emulate_ctxt= *ctxt, vcpu->arch.last_retry_eip =3D ctxt->eip; vcpu->arch.last_retry_addr =3D cr2_or_gpa; =20 - if (!vcpu->arch.mmu->direct_map) + if (!vcpu->arch.mmu->root_role.direct) gpa =3D kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); =20 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); @@ -8397,7 +8397,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gp= a_t cr2_or_gpa, ctxt->exception.address =3D cr2_or_gpa; =20 /* With shadow page tables, cr2 contains a GVA or nGPA. */ - if (vcpu->arch.mmu->direct_map) { + if (vcpu->arch.mmu->root_role.direct) { ctxt->gpa_available =3D true; ctxt->gpa_val =3D cr2_or_gpa; } @@ -12198,7 +12198,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcp= u, struct kvm_async_pf *work) { int r; =20 - if ((vcpu->arch.mmu->direct_map !=3D work->arch.direct_map) || + if ((vcpu->arch.mmu->root_role.direct !=3D work->arch.direct_map) || work->wakeup_all) return; =20 @@ -12206,7 +12206,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcp= u, struct kvm_async_pf *work) if (unlikely(r)) return; =20 - if (!vcpu->arch.mmu->direct_map && + if (!vcpu->arch.mmu->root_role.direct && work->arch.cr3 !=3D kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu)) return; =20 --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5A60FC433EF for ; Mon, 21 Feb 2022 16:24:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380440AbiBUQZB (ORCPT ); Mon, 21 Feb 2022 11:25:01 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53662 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380262AbiBUQXY (ORCPT ); Mon, 21 Feb 2022 11:23:24 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 7877F27161 for ; Mon, 21 Feb 2022 08:23:00 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460579; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=dPlOBPn8ErB7X42yhcpXa0pXThH6x0ky6io51/DXKJ4=; b=B73mfTOl7CaFTnRq48R4skMg0VWx9Ax7aKOySS63TWQYWYaYwjURJgr/Hk3K4pF40D6Zdu V+XTUMGsZSruY7wxkj/o8nmaco+QK17phu4sNx1ueFzpG1I3RLf+hs28CyTfwUxtXzeHzW Rj1mjsFEtbfsq3b8W96wENvOCRTQOsI= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-74-xOUV900ENwCCOIjIo3B4nA-1; Mon, 21 Feb 2022 11:22:58 -0500 X-MC-Unique: xOUV900ENwCCOIjIo3B4nA-1 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id B780C801B04; Mon, 21 Feb 2022 16:22:56 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 597D084A14; Mon, 21 Feb 2022 16:22:56 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 24/25] KVM: x86/mmu: initialize constant-value fields just once Date: Mon, 21 Feb 2022 11:22:42 -0500 Message-Id: <20220221162243.683208-25-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The get_guest_pgd, get_pdptr and inject_page_fault pointers are constant for all three of root_mmu, guest_mmu and nested_mmu. In fact, the guest_mmu function pointers depend on the processor vendor and need to be retrieved from three new nested_ops, but the others are absolutely the same. Opportunistically stop initializing get_pdptr for nested EPT, since it does not have PDPTRs. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 5 +++ arch/x86/kvm/mmu/mmu.c | 65 +++++++++++++++++---------------- arch/x86/kvm/svm/nested.c | 9 +++-- arch/x86/kvm/vmx/nested.c | 5 +-- 4 files changed, 46 insertions(+), 38 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index af90d0653139..b70965235c31 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1503,6 +1503,11 @@ struct kvm_x86_nested_ops { uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu); void (*inject_page_fault)(struct kvm_vcpu *vcpu, struct x86_exception *fault); + void (*inject_nested_tdp_vmexit)(struct kvm_vcpu *vcpu, + struct x86_exception *fault); + + unsigned long (*get_nested_pgd)(struct kvm_vcpu *vcpu); + u64 (*get_nested_pdptr)(struct kvm_vcpu *vcpu, int index); }; =20 struct kvm_x86_init_ops { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8eb2c0373309..27cb6ba5a3b0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4743,12 +4743,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, = union kvm_mmu_paging_mode cp =20 context->cpu_mode.as_u64 =3D cpu_mode.as_u64; context->root_role.word =3D root_role.word; - context->page_fault =3D kvm_tdp_page_fault; - context->sync_page =3D nonpaging_sync_page; - context->invlpg =3D NULL; - context->get_guest_pgd =3D kvm_get_guest_cr3; - context->get_pdptr =3D kvm_pdptr_read; - context->inject_page_fault =3D kvm_inject_page_fault; =20 if (!is_cr0_pg(context)) context->gva_to_gpa =3D nonpaging_gva_to_gpa; @@ -4758,7 +4752,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, u= nion kvm_mmu_paging_mode cp context->gva_to_gpa =3D paging32_gva_to_gpa; =20 reset_guest_paging_metadata(vcpu, context); - reset_tdp_shadow_zero_bits_mask(context); } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, @@ -4783,8 +4776,8 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte reset_shadow_zero_bits_mask(vcpu, context); } =20 -static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, - union kvm_mmu_paging_mode cpu_mode) +static void init_kvm_softmmu(struct kvm_vcpu *vcpu, + union kvm_mmu_paging_mode cpu_mode) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_page_role root_role; @@ -4880,18 +4873,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, = bool execonly, } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); =20 -static void init_kvm_softmmu(struct kvm_vcpu *vcpu, - union kvm_mmu_paging_mode cpu_mode) -{ - struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - - kvm_init_shadow_mmu(vcpu, cpu_mode); - - context->get_guest_pgd =3D kvm_get_guest_cr3; - context->get_pdptr =3D kvm_pdptr_read; - context->inject_page_fault =3D kvm_inject_page_fault_shadow; -} - static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_pagin= g_mode new_mode) { struct kvm_mmu *g_context =3D &vcpu->arch.nested_mmu; @@ -4899,16 +4880,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcp= u, union kvm_mmu_paging_mode if (new_mode.as_u64 =3D=3D g_context->cpu_mode.as_u64) return; =20 - g_context->cpu_mode.as_u64 =3D new_mode.as_u64; - g_context->get_guest_pgd =3D kvm_get_guest_cr3; - g_context->get_pdptr =3D kvm_pdptr_read; - g_context->inject_page_fault =3D kvm_inject_page_fault; - - /* - * L2 page tables are never shadowed, so there is no need to sync - * SPTEs. - */ - g_context->invlpg =3D NULL; + g_context->cpu_mode.as_u64 =3D new_mode.as_u64; =20 /* * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using @@ -5477,6 +5449,37 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) =20 vcpu->arch.mmu_shadow_page_cache.gfp_zero =3D __GFP_ZERO; =20 + vcpu->arch.root_mmu.get_guest_pgd =3D kvm_get_guest_cr3; + vcpu->arch.root_mmu.get_pdptr =3D kvm_pdptr_read; + + if (tdp_enabled) { + vcpu->arch.root_mmu.inject_page_fault =3D kvm_inject_page_fault; + vcpu->arch.root_mmu.page_fault =3D kvm_tdp_page_fault; + vcpu->arch.root_mmu.sync_page =3D nonpaging_sync_page; + vcpu->arch.root_mmu.invlpg =3D NULL; + reset_tdp_shadow_zero_bits_mask(&vcpu->arch.root_mmu); + + vcpu->arch.guest_mmu.get_guest_pgd =3D kvm_x86_ops.nested_ops->get_neste= d_pgd; + vcpu->arch.guest_mmu.get_pdptr =3D kvm_x86_ops.nested_ops->get_nested_pd= ptr; + vcpu->arch.guest_mmu.inject_page_fault =3D kvm_x86_ops.nested_ops->injec= t_nested_tdp_vmexit; + } else { + vcpu->arch.root_mmu.inject_page_fault =3D kvm_inject_page_fault_shadow; + /* + * page_fault, sync_page, invlpg are set at runtime depending + * on the guest paging mode. + */ + } + + vcpu->arch.nested_mmu.get_guest_pgd =3D kvm_get_guest_cr3; + vcpu->arch.nested_mmu.get_pdptr =3D kvm_pdptr_read; + vcpu->arch.nested_mmu.inject_page_fault =3D kvm_inject_page_fault; + + /* + * L2 page tables are never shadowed, so there is no need to sync + * SPTEs. + */ + vcpu->arch.nested_mmu.invlpg =3D NULL; + vcpu->arch.mmu =3D &vcpu->arch.root_mmu; vcpu->arch.walk_mmu =3D &vcpu->arch.root_mmu; =20 diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index ff58c9ebc552..713c7531de99 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -109,10 +109,8 @@ static void nested_svm_init_mmu_context(struct kvm_vcp= u *vcpu) kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, svm->vmcb01.ptr->save.efer, svm->nested.ctl.nested_cr3); - vcpu->arch.mmu->get_guest_pgd =3D nested_svm_get_tdp_cr3; - vcpu->arch.mmu->get_pdptr =3D nested_svm_get_tdp_pdptr; - vcpu->arch.mmu->inject_page_fault =3D nested_svm_inject_npf_exit; - vcpu->arch.walk_mmu =3D &vcpu->arch.nested_mmu; + + vcpu->arch.walk_mmu =3D &vcpu->arch.nested_mmu; } =20 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) @@ -1569,4 +1567,7 @@ struct kvm_x86_nested_ops svm_nested_ops =3D { .get_state =3D svm_get_nested_state, .set_state =3D svm_set_nested_state, .inject_page_fault =3D svm_inject_page_fault_nested, + .inject_nested_tdp_vmexit =3D nested_svm_inject_npf_exit, + .get_nested_pgd =3D nested_svm_get_tdp_cr3, + .get_nested_pdptr =3D nested_svm_get_tdp_pdptr, }; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 564c60566da7..02df0f4fccef 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -414,9 +414,6 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu= *vcpu) =20 vcpu->arch.mmu =3D &vcpu->arch.guest_mmu; nested_ept_new_eptp(vcpu); - vcpu->arch.mmu->get_guest_pgd =3D nested_ept_get_eptp; - vcpu->arch.mmu->inject_page_fault =3D nested_ept_inject_page_fault; - vcpu->arch.mmu->get_pdptr =3D kvm_pdptr_read; =20 vcpu->arch.walk_mmu =3D &vcpu->arch.nested_mmu; } @@ -6805,4 +6802,6 @@ struct kvm_x86_nested_ops vmx_nested_ops =3D { .enable_evmcs =3D nested_enable_evmcs, .get_evmcs_version =3D nested_get_evmcs_version, .inject_page_fault =3D vmx_inject_page_fault_nested, + .inject_nested_tdp_vmexit =3D nested_ept_inject_page_fault, + .get_nested_pgd =3D nested_ept_get_eptp, }; --=20 2.31.1 From nobody Sun May 5 06:31:06 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A8C1AC4332F for ; Mon, 21 Feb 2022 16:24:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380499AbiBUQZF (ORCPT ); Mon, 21 Feb 2022 11:25:05 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53624 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380272AbiBUQX0 (ORCPT ); Mon, 21 Feb 2022 11:23:26 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 832D227B05 for ; Mon, 21 Feb 2022 08:23:02 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460581; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=FeFhJdHXOlsdXfhmOYeTe++C7zpCQT2snbYmjw3XDxQ=; b=S/khDaC6pgAXYfQinI6BkilW31mhDOUT6fTKo1Dgzgkm4/OVTxQl4WXVWKBxJGIHJ+6DKA CXERXmjH4BON5lR6y8gOB5OGXgBHc3hZrWM9vjJkepJGNFRu4R9LqjQ3QKMiugq0nI1llj wD7LyJN8TJVJ3cK2iPfcsx0Lis3hsHc= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-654-om55HxEBP1aVmQnKxcqC9Q-1; Mon, 21 Feb 2022 11:22:58 -0500 X-MC-Unique: om55HxEBP1aVmQnKxcqC9Q-1 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 5140B1091DA1; Mon, 21 Feb 2022 16:22:57 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id E69E384A0E; Mon, 21 Feb 2022 16:22:56 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 25/25] KVM: x86/mmu: extract initialization of the page walking data Date: Mon, 21 Feb 2022 11:22:43 -0500 Message-Id: <20220221162243.683208-26-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" struct kvm_mmu consists logically of two parts: a page walker that operates based on the CPU mode, and the shadow page table builder that operates based on the MMU role; the latter does not exist on vcpu->arch.nested_mmu. The callbacks are also logically separated; of those that are not constant, gva_to_gpa belongs to the page walker and everything else belongs to the shadow page table builder. This is visible in the duplicated code to initialize gva_to_gpa in *_init_context (for shadow paging and nested NPT), in init_kvm_tdp_mmu (for non-nested TDP), and in init_kvm_nested_mmu. The guest paging metadata also belongs to the page walker and is duplicated in the same way. Extract this duplicated code to a new function. The new function is basically the same as init_kvm_nested_mmu, since the nested MMU has only the page walker part. The only difference is that it uses the CPU mode rather than the VCPU directly, which is more in line with the rest of the MMU code. Shadow EPT does not use the new function, since it has its own gva_to_gpa callback and a different set of reserved bits. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 87 ++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 59 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 27cb6ba5a3b0..659f014190d2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4070,7 +4070,6 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct = kvm_page_fault *fault) static void nonpaging_init_context(struct kvm_mmu *context) { context->page_fault =3D nonpaging_page_fault; - context->gva_to_gpa =3D nonpaging_gva_to_gpa; context->sync_page =3D nonpaging_sync_page; context->invlpg =3D NULL; } @@ -4651,7 +4650,6 @@ static void reset_guest_paging_metadata(struct kvm_vc= pu *vcpu, static void paging64_init_context(struct kvm_mmu *context) { context->page_fault =3D paging64_page_fault; - context->gva_to_gpa =3D paging64_gva_to_gpa; context->sync_page =3D paging64_sync_page; context->invlpg =3D paging64_invlpg; } @@ -4659,7 +4657,6 @@ static void paging64_init_context(struct kvm_mmu *con= text) static void paging32_init_context(struct kvm_mmu *context) { context->page_fault =3D paging32_page_fault; - context->gva_to_gpa =3D paging32_gva_to_gpa; context->sync_page =3D paging32_sync_page; context->invlpg =3D paging32_invlpg; } @@ -4700,6 +4697,24 @@ kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struc= t kvm_mmu_role_regs *regs) return role; } =20 +static void kvm_vcpu_init_walker(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, + union kvm_mmu_paging_mode new_mode) +{ + if (new_mode.as_u64 =3D=3D mmu->cpu_mode.as_u64) + return; + + mmu->cpu_mode.as_u64 =3D new_mode.as_u64; + if (!is_cr0_pg(mmu)) + mmu->gva_to_gpa =3D nonpaging_gva_to_gpa; + else if (is_cr4_pae(mmu)) + mmu->gva_to_gpa =3D paging64_gva_to_gpa; + else + mmu->gva_to_gpa =3D paging32_gva_to_gpa; + + reset_guest_paging_metadata(vcpu, mmu); +} + static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) { /* tdp_root_level is architecture forced level, use it if nonzero */ @@ -4735,36 +4750,17 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vc= pu, static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_paging_m= ode cpu_mode) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; - union kvm_mmu_page_role root_role =3D kvm_calc_tdp_mmu_root_page_role(vcp= u, cpu_mode); =20 - if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && - root_role.word =3D=3D context->root_role.word) - return; - - context->cpu_mode.as_u64 =3D cpu_mode.as_u64; - context->root_role.word =3D root_role.word; - - if (!is_cr0_pg(context)) - context->gva_to_gpa =3D nonpaging_gva_to_gpa; - else if (is_cr4_pae(context)) - context->gva_to_gpa =3D paging64_gva_to_gpa; - else - context->gva_to_gpa =3D paging32_gva_to_gpa; - - reset_guest_paging_metadata(vcpu, context); + context->root_role =3D kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_mode); } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, - union kvm_mmu_paging_mode cpu_mode, union kvm_mmu_page_role root_role) { - if (cpu_mode.as_u64 =3D=3D context->cpu_mode.as_u64 && - root_role.word =3D=3D context->root_role.word) + if (root_role.word =3D=3D context->root_role.word) return; =20 - context->cpu_mode.as_u64 =3D cpu_mode.as_u64; context->root_role.word =3D root_role.word; - if (!is_cr0_pg(context)) nonpaging_init_context(context); else if (is_cr4_pae(context)) @@ -4772,7 +4768,6 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte else paging32_init_context(context); =20 - reset_guest_paging_metadata(vcpu, context); reset_shadow_zero_bits_mask(vcpu, context); } =20 @@ -4795,8 +4790,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu, * MMU contexts. */ root_role.efer_nx =3D true; - - shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); + shadow_mmu_init_context(vcpu, context, root_role); } =20 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, @@ -4811,10 +4805,12 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu,= unsigned long cr0, union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); union kvm_mmu_page_role root_role; =20 + kvm_vcpu_init_walker(vcpu, context, cpu_mode); + root_role =3D cpu_mode.base; root_role.level =3D kvm_mmu_get_tdp_level(vcpu); =20 - shadow_mmu_init_context(vcpu, context, cpu_mode, root_role); + shadow_mmu_init_context(vcpu, context, root_role); kvm_mmu_new_pgd(vcpu, nested_cr3); } EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); @@ -4873,43 +4869,16 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu,= bool execonly, } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); =20 -static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_pagin= g_mode new_mode) -{ - struct kvm_mmu *g_context =3D &vcpu->arch.nested_mmu; - - if (new_mode.as_u64 =3D=3D g_context->cpu_mode.as_u64) - return; - - g_context->cpu_mode.as_u64 =3D new_mode.as_u64; - - /* - * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using - * L1's nested page tables (e.g. EPT12). The nested translation - * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using - * L2's page tables as the first level of translation and L1's - * nested page tables as the second level of translation. Basically - * the gva_to_gpa functions between mmu and nested_mmu are swapped. - */ - if (!is_paging(vcpu)) - g_context->gva_to_gpa =3D nonpaging_gva_to_gpa; - else if (is_long_mode(vcpu)) - g_context->gva_to_gpa =3D paging64_gva_to_gpa; - else if (is_pae(vcpu)) - g_context->gva_to_gpa =3D paging64_gva_to_gpa; - else - g_context->gva_to_gpa =3D paging32_gva_to_gpa; - - reset_guest_paging_metadata(vcpu, g_context); -} - void kvm_init_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu_role_regs regs =3D vcpu_to_role_regs(vcpu); union kvm_mmu_paging_mode cpu_mode =3D kvm_calc_cpu_mode(vcpu, ®s); =20 + kvm_vcpu_init_walker(vcpu, vcpu->arch.walk_mmu, cpu_mode); if (mmu_is_nested(vcpu)) - init_kvm_nested_mmu(vcpu, cpu_mode); - else if (tdp_enabled) + return; + + if (tdp_enabled) init_kvm_tdp_mmu(vcpu, cpu_mode); else init_kvm_softmmu(vcpu, cpu_mode); --=20 2.31.1