From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A53C0C4167B for ; Tue, 7 Nov 2023 20:20:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231276AbjKGUUU (ORCPT ); Tue, 7 Nov 2023 15:20:20 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35194 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229737AbjKGUUQ (ORCPT ); Tue, 7 Nov 2023 15:20:16 -0500 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3DD6210C2 for ; Tue, 7 Nov 2023 12:20:14 -0800 (PST) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-5b0c27d504fso580647b3.1 for ; Tue, 07 Nov 2023 12:20:14 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388413; x=1699993213; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=n5hqzLmoyRbfXoD17FLyCrxsjfvh0aIGlQdq5ev3Q9c=; b=l5rpy2UqTpN1rQCT8Jnx3570IjP2yUYqfJ7y0Tq7VCavv1tmxyOybrsYVuB1yC6Fa/ OLBzkjr4Aa2UjmjHbbeeY+bycIMrjo9dmEWDGQwZZV8cOgsh1hUXONP6g58Ns0qMsOtx uxcPr90TJcIpQ0GrwMaeCFk71pHq9xrHQ0OSlCRnShkGXuYQzfUwa3fFh8UfUAD6dYUl JsaNW54oNypanHxEHDgy8cYLTt8vwYB+M3WLsXaj8Cc/uzMmlaML3UHMlcsgU7NZhTYQ IGIk6aY6S9+4Ug+V1Xnip0T0NN1A4vOqDwNwsp6zVoA1yzUXNOFT7pokk0uAJLbAkjs0 Y3qg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388413; x=1699993213; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=n5hqzLmoyRbfXoD17FLyCrxsjfvh0aIGlQdq5ev3Q9c=; b=tV/TbJWMf0X5fJlng/flDygDzz0DKTCxXKUcj0ZLwRJSsu+vvoF3ICB+vjwEGTirJY fMCkHRcqNNC9VYMNdfkZMkrvB510K6cFhbVDJ5cYwLghNEhisU5RfMai3tgdrah7Q8+N fmWHsn176pAKVK2FjfXWXueZBEv/iqw9PvYaOIUhmaL1WqwT7aPitoBXzv5xkWuzAlt9 dFQYJyppM0XnLy9t8AMffw8ummEOREi/ftXSLxoDq5Q+4CASc+U3R/Y3HAhl9wNPC7rh wSbB+s30X/TC9e51DKsuGqcbzw1UZdIx2HnDq3RphytZO0HGVS1M9XEJIsBvnpKXGn0j 5wbA== X-Gm-Message-State: AOJu0Yynf/qNIhUjIQY065XW6JSQc9UOGMyO+BJSZaA+v31tTK8wpdY+ cnHhbe5cQsJ/Ob0SmEngdteTVBBAdpYWvA== X-Google-Smtp-Source: AGHT+IH5TlkA0A8QmLKrs5St2q9LNsq3BVxdtAUXbMa8iB4fXv45uhXsqEMlUmVz1s49M/d7cy62oIp5RAP5pg== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a0d:cb86:0:b0:5a7:b4d5:5f27 with SMTP id n128-20020a0dcb86000000b005a7b4d55f27mr76083ywd.5.1699388413421; Tue, 07 Nov 2023 12:20:13 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:49 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-2-aghulati@google.com> Subject: [RFC PATCH 01/14] KVM: x86: Move common module params from SVM/VMX to x86 From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move nested and enable_vnmi from SVM and VMX into x86. Signed-off-by: Anish Ghulati --- arch/x86/kvm/svm/nested.c | 4 ++-- arch/x86/kvm/svm/svm.c | 17 +++++------------ arch/x86/kvm/svm/svm.h | 3 +-- arch/x86/kvm/vmx/vmx.c | 11 ----------- arch/x86/kvm/x86.c | 11 +++++++++++ arch/x86/kvm/x86.h | 4 ++++ 6 files changed, 23 insertions(+), 27 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index dd496c9e5f91..aebccf7c1c2d 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -666,7 +666,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_s= vm *svm, else int_ctl_vmcb01_bits |=3D (V_GIF_MASK | V_GIF_ENABLE_MASK); =20 - if (vnmi) { + if (enable_vnmi) { if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) { svm->vcpu.arch.nmi_pending++; kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); @@ -1083,7 +1083,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm_update_lbrv(vcpu); } =20 - if (vnmi) { + if (enable_vnmi) { if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK) vmcb01->control.int_ctl |=3D V_NMI_BLOCKING_MASK; else diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f283eb47f6ac..3d44e42f4f22 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -197,10 +197,6 @@ module_param(pause_filter_count_max, ushort, 0444); bool npt_enabled =3D true; module_param_named(npt, npt_enabled, bool, 0444); =20 -/* allow nested virtualization in KVM/SVM */ -static int nested =3D true; -module_param(nested, int, S_IRUGO); - /* enable/disable Next RIP Save */ int nrips =3D true; module_param(nrips, int, 0444); @@ -234,9 +230,6 @@ module_param(dump_invalid_vmcb, bool, 0644); bool intercept_smi =3D true; module_param(intercept_smi, bool, 0444); =20 -bool vnmi =3D true; -module_param(vnmi, bool, 0444); - static bool svm_gp_erratum_intercept =3D true; =20 static u8 rsm_ins_bytes[] =3D "\x0f\xaa"; @@ -1357,7 +1350,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu) if (kvm_vcpu_apicv_active(vcpu)) avic_init_vmcb(svm, vmcb); =20 - if (vnmi) + if (enable_vnmi) svm->vmcb->control.int_ctl |=3D V_NMI_ENABLE_MASK; =20 if (vgif) { @@ -5089,7 +5082,7 @@ static __init void svm_set_cpu_caps(void) if (vgif) kvm_cpu_cap_set(X86_FEATURE_VGIF); =20 - if (vnmi) + if (enable_vnmi) kvm_cpu_cap_set(X86_FEATURE_VNMI); =20 /* Nested VM can receive #VMEXIT instead of triggering #GP */ @@ -5253,11 +5246,11 @@ static __init int svm_hardware_setup(void) pr_info("Virtual GIF supported\n"); } =20 - vnmi =3D vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI); - if (vnmi) + enable_vnmi =3D vgif && enable_vnmi && boot_cpu_has(X86_FEATURE_VNMI); + if (enable_vnmi) pr_info("Virtual NMI enabled\n"); =20 - if (!vnmi) { + if (!enable_vnmi) { svm_x86_ops.is_vnmi_pending =3D NULL; svm_x86_ops.set_vnmi_pending =3D NULL; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f41253958357..436632706848 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -38,7 +38,6 @@ extern int nrips; extern int vgif; extern bool intercept_smi; extern bool x2avic_enabled; -extern bool vnmi; =20 /* * Clean bits in VMCB. @@ -510,7 +509,7 @@ static inline bool is_x2apic_msrpm_offset(u32 offset) =20 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm) { - if (!vnmi) + if (!enable_vnmi) return NULL; =20 if (is_guest_mode(&svm->vcpu)) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 43b87ad5fde8..65d59de3cc63 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -80,9 +80,6 @@ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); bool __read_mostly enable_vpid =3D 1; module_param_named(vpid, enable_vpid, bool, 0444); =20 -static bool __read_mostly enable_vnmi =3D 1; -module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); - bool __read_mostly flexpriority_enabled =3D 1; module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); =20 @@ -107,14 +104,6 @@ module_param(enable_apicv, bool, S_IRUGO); bool __read_mostly enable_ipiv =3D true; module_param(enable_ipiv, bool, 0444); =20 -/* - * If nested=3D1, nested virtualization is supported, i.e., guests may use - * VMX and be a hypervisor for its own guests. If nested=3D0, guests may n= ot - * use VMX instructions. - */ -static bool __read_mostly nested =3D 1; -module_param(nested, bool, S_IRUGO); - bool __read_mostly enable_pml =3D 1; module_param_named(pml, enable_pml, bool, S_IRUGO); =20 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index aab095f89d9e..6b7f89fd2d47 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -176,6 +176,17 @@ bool __read_mostly enable_vmware_backdoor =3D false; module_param(enable_vmware_backdoor, bool, S_IRUGO); EXPORT_SYMBOL_GPL(enable_vmware_backdoor); =20 +/* + * If nested=3D1, nested virtualization is supported + */ +bool __read_mostly nested =3D 1; +module_param(nested, bool, S_IRUGO); +EXPORT_SYMBOL_GPL(nested); + +bool __read_mostly enable_vnmi =3D 1; +module_param(enable_vnmi, bool, S_IRUGO); +EXPORT_SYMBOL_GPL(enable_vnmi); + /* * Flags to manipulate forced emulation behavior (any non-zero value will * enable forced emulation). diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1e7be1f6ab29..6b5490319d1b 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -367,6 +367,10 @@ extern unsigned int min_timer_period_us; =20 extern bool enable_vmware_backdoor; =20 +extern bool nested; + +extern bool enable_vnmi; + extern int pi_inject_timer; =20 extern bool report_ignored_msrs; --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 90063C4332F for ; Tue, 7 Nov 2023 20:20:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235145AbjKGUUX (ORCPT ); Tue, 7 Nov 2023 15:20:23 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40806 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233922AbjKGUUS (ORCPT ); Tue, 7 Nov 2023 15:20:18 -0500 Received: from mail-oi1-x24a.google.com (mail-oi1-x24a.google.com [IPv6:2607:f8b0:4864:20::24a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9BAF5D7E for ; Tue, 7 Nov 2023 12:20:16 -0800 (PST) Received: by mail-oi1-x24a.google.com with SMTP id 5614622812f47-3b33efa93eeso9839210b6e.3 for ; Tue, 07 Nov 2023 12:20:16 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388416; x=1699993216; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=fqF5UxPGCIsPTXDpdFGLw6JNCjnN+IkawCMrr9G3hrg=; b=OoFYkDOCyByTEE1kC0f/K9oKeA8ATHx77oteSXrRgrxNwp9r1dNxo6Ux+HEiYo22+A Gq6bXq9vp1MwH1Ext4y8nGyisAq38OufTVHKPd+M5I5XlWTKmQNbS5uTAfhqkqrVDGV+ v+GG3TZXZr2FVwpWFR3afAMI7bwyKYFHW1ZPTYrNAFwmeSmHI/V8PQ59K2rmS7glBO+H SlGsY38t+8x6ommBVwcdYZuAvkzH0L+Wh1aXiwewGUKVOKlkgn04bcX1hG/Z2N6yHCJu s5dhm5rGAbUZ9f2GHduEoMQSVtR5ggak/obsAZ56hjkxgM05KdemZaUm/SppBW2xaZEB k3lA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388416; x=1699993216; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=fqF5UxPGCIsPTXDpdFGLw6JNCjnN+IkawCMrr9G3hrg=; b=c5pHRbThNQI8DoE+88+5ae7NxbZQdT37toUBXAweWonimZgGIjtJTb+lHZGu8rVVMb fzOoTSblBQ3Rvb07daQ618jrV+wi97+kwMRg0PGbETng05RiYTZ0egUeNFmGNFce05lR Y25rWlHZx3NnBjJtfYOLpBvrlxr39k19yMqCj5KR4u7hrq2iaLZCBBmqrm5n6nEZjCtZ yCkH02H3/doYyJoKRBdLja5IginpxbRAd7Av7Cmu+0+U9vqIAgLpvcpF7vUQJwLPiIXc 3K1/ff3wApl4KvEKAm70tFpcUi2VL9u/GTMz5kqsZgUlLtkkhj4ML6DjutbNft/TTXkY MfdA== X-Gm-Message-State: AOJu0YwRd/c7zJ+USVvHAKsaBNonlBSkOjqaZC0yvI2OpW+b/pnJQbmr qOS8i8Q0pLHTFzqC/7pF5Gy2Adak6SXXyg== X-Google-Smtp-Source: AGHT+IGsmR+VjZnkSZCKtWQJ+da17ch83Le87Fy2FR4U1VEptfEHSPOgQeqPU5o+NZ5XevZ/Xc4/PABf4Hyxow== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a05:6808:1913:b0:3b2:f40e:9493 with SMTP id bf19-20020a056808191300b003b2f40e9493mr12724oib.6.1699388416025; Tue, 07 Nov 2023 12:20:16 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:50 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-3-aghulati@google.com> Subject: [RFC PATCH 02/14] KVM: x86: Fold x86 vendor modules into the main KVM modules From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Collapse x86 vendor modules, i.e. kvm_intel and kvm_amd into kvm.ko. Add a new vendor_exit function to kvm_x86_ops so that KVM knows which exit function to call when exiting. Since the vendor modules no longer exist the vendor_exit function call does not have to go through a static call. Expose vendor init/exit/supported functions so that they can be called from vendor neutral KVM locations. Signed-off-by: Anish Ghulati --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kernel/nmi.c | 2 +- arch/x86/kvm/Kconfig | 12 ++---- arch/x86/kvm/Makefile | 10 ++--- arch/x86/kvm/svm/svm.c | 38 +++++++---------- arch/x86/kvm/vmx/vmx.c | 66 +++++++++++++----------------- arch/x86/kvm/x86.c | 18 +++++--- arch/x86/kvm/x86.h | 15 +++++++ 9 files changed, 82 insertions(+), 82 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-= x86-ops.h index e3054e3e46d5..764be4a26a0c 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -14,6 +14,7 @@ BUILD_BUG_ON(1) * to make a definition optional, but in this case the default will * be __static_call_return0. */ +KVM_X86_OP(vendor_exit) KVM_X86_OP(check_processor_compatibility) KVM_X86_OP(hardware_enable) KVM_X86_OP(hardware_disable) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index eda45a937666..e01d1aa3628c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1546,6 +1546,8 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_m= ode_logical) struct kvm_x86_ops { const char *name; =20 + void (*vendor_exit)(void); + int (*check_processor_compatibility)(void); =20 int (*hardware_enable)(void); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index a0c551846b35..8f2ac7598912 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -565,7 +565,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi_kvm_vmx) { exc_nmi(regs); } -#if IS_MODULE(CONFIG_KVM_INTEL) +#if IS_MODULE(CONFIG_KVM) && IS_ENABLED(CONFIG_KVM_INTEL) EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx); #endif #endif diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 8c5fb7f57b4c..adfa57d59643 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -75,15 +75,12 @@ config KVM_WERROR If in doubt, say "N". =20 config KVM_INTEL - tristate "KVM for Intel (and compatible) processors support" + bool "KVM for Intel (and compatible) processors support" depends on KVM && IA32_FEAT_CTL help Provides support for KVM on processors equipped with Intel's VT extensions, a.k.a. Virtual Machine Extensions (VMX). =20 - To compile this as a module, choose M here: the module - will be called kvm-intel. - config X86_SGX_KVM bool "Software Guard eXtensions (SGX) Virtualization" depends on X86_SGX && KVM_INTEL @@ -97,20 +94,17 @@ config X86_SGX_KVM If unsure, say N. =20 config KVM_AMD - tristate "KVM for AMD processors support" + bool "KVM for AMD processors support" depends on KVM && (CPU_SUP_AMD || CPU_SUP_HYGON) help Provides support for KVM on AMD processors equipped with the AMD-V (SVM) extensions. =20 - To compile this as a module, choose M here: the module - will be called kvm-amd. - config KVM_AMD_SEV def_bool y bool "AMD Secure Encrypted Virtualization (SEV) support" depends on KVM_AMD && X86_64 - depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=3Dy && CRYPTO_DEV_CCP_DD=3Dm) + depends on CRYPTO_DEV_SP_PSP && !(KVM=3Dy && CRYPTO_DEV_CCP_DD=3Dm) help Provides support for launching Encrypted VMs (SEV) and Encrypted VMs with Encrypted State (SEV-ES) on AMD processors. diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index d13f1a7b7b3d..3e965c90e065 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -21,20 +21,18 @@ kvm-$(CONFIG_X86_64) +=3D mmu/tdp_iter.o mmu/tdp_mmu.o kvm-$(CONFIG_KVM_XEN) +=3D xen.o kvm-$(CONFIG_KVM_SMM) +=3D smm.o =20 -kvm-intel-y +=3D vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ +kvm-$(CONFIG_KVM_INTEL) +=3D vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/v= mcs12.o \ vmx/hyperv.o vmx/nested.o vmx/posted_intr.o -kvm-intel-$(CONFIG_X86_SGX_KVM) +=3D vmx/sgx.o +kvm-$(CONFIG_X86_SGX_KVM) +=3D vmx/sgx.o =20 -kvm-amd-y +=3D svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \ +kvm-$(CONFIG_KVM_AMD) +=3D svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o = svm/avic.o \ svm/sev.o svm/hyperv.o =20 ifdef CONFIG_HYPERV -kvm-amd-y +=3D svm/svm_onhyperv.o +kvm-$(CONFIG_KVM_AMD) +=3D svm/svm_onhyperv.o endif =20 obj-$(CONFIG_KVM) +=3D kvm.o -obj-$(CONFIG_KVM_INTEL) +=3D kvm-intel.o -obj-$(CONFIG_KVM_AMD) +=3D kvm-amd.o =20 AFLAGS_svm/vmenter.o :=3D -iquote $(obj) $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3d44e42f4f22..7fe9d11db8a6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -52,9 +52,6 @@ #include "kvm_onhyperv.h" #include "svm_onhyperv.h" =20 -MODULE_AUTHOR("Qumranet"); -MODULE_LICENSE("GPL"); - #ifdef MODULE static const struct x86_cpu_id svm_cpu_id[] =3D { X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), @@ -551,7 +548,7 @@ static bool __kvm_is_svm_supported(void) return true; } =20 -static bool kvm_is_svm_supported(void) +bool kvm_is_svm_supported(void) { bool supported; =20 @@ -4873,9 +4870,21 @@ static int svm_vm_init(struct kvm *kvm) return 0; } =20 +static void __svm_exit(void) +{ + cpu_emergency_unregister_virt_callback(svm_emergency_disable); +} + +void svm_module_exit(void) +{ + __svm_exit(); +} + static struct kvm_x86_ops svm_x86_ops __initdata =3D { .name =3D KBUILD_MODNAME, =20 + .vendor_exit =3D svm_module_exit, + .check_processor_compatibility =3D svm_check_processor_compat, =20 .hardware_unsetup =3D svm_hardware_unsetup, @@ -5298,22 +5307,12 @@ static struct kvm_x86_init_ops svm_init_ops __initd= ata =3D { .pmu_ops =3D &amd_pmu_ops, }; =20 -static void __svm_exit(void) -{ - kvm_x86_vendor_exit(); - - cpu_emergency_unregister_virt_callback(svm_emergency_disable); -} - -static int __init svm_init(void) +int __init svm_init(void) { int r; =20 __unused_size_checks(); =20 - if (!kvm_is_svm_supported()) - return -EOPNOTSUPP; - r =3D kvm_x86_vendor_init(&svm_init_ops); if (r) return r; @@ -5335,12 +5334,3 @@ static int __init svm_init(void) __svm_exit(); return r; } - -static void __exit svm_exit(void) -{ - kvm_exit(); - __svm_exit(); -} - -module_init(svm_init) -module_exit(svm_exit) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 65d59de3cc63..629e662b131e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -66,9 +66,6 @@ #include "x86.h" #include "smm.h" =20 -MODULE_AUTHOR("Qumranet"); -MODULE_LICENSE("GPL"); - #ifdef MODULE static const struct x86_cpu_id vmx_cpu_id[] =3D { X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL), @@ -2738,7 +2735,7 @@ static bool __kvm_is_vmx_supported(void) return true; } =20 -static bool kvm_is_vmx_supported(void) +bool kvm_is_vmx_supported(void) { bool supported; =20 @@ -8199,9 +8196,35 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm= )); } =20 +static void vmx_cleanup_l1d_flush(void) +{ + if (vmx_l1d_flush_pages) { + free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); + vmx_l1d_flush_pages =3D NULL; + } + /* Restore state so sysfs ignores VMX */ + l1tf_vmx_mitigation =3D VMENTER_L1D_FLUSH_AUTO; +} + +static void __vmx_exit(void) +{ + allow_smaller_maxphyaddr =3D false; + + cpu_emergency_unregister_virt_callback(vmx_emergency_disable); + + vmx_cleanup_l1d_flush(); +} + +void vmx_module_exit(void) +{ + __vmx_exit(); +} + static struct kvm_x86_ops vmx_x86_ops __initdata =3D { .name =3D KBUILD_MODNAME, =20 + .vendor_exit =3D vmx_module_exit, + .check_processor_compatibility =3D vmx_check_processor_compat, =20 .hardware_unsetup =3D vmx_hardware_unsetup, @@ -8608,41 +8631,10 @@ static struct kvm_x86_init_ops vmx_init_ops __initd= ata =3D { .pmu_ops =3D &intel_pmu_ops, }; =20 -static void vmx_cleanup_l1d_flush(void) -{ - if (vmx_l1d_flush_pages) { - free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); - vmx_l1d_flush_pages =3D NULL; - } - /* Restore state so sysfs ignores VMX */ - l1tf_vmx_mitigation =3D VMENTER_L1D_FLUSH_AUTO; -} - -static void __vmx_exit(void) -{ - allow_smaller_maxphyaddr =3D false; - - cpu_emergency_unregister_virt_callback(vmx_emergency_disable); - - vmx_cleanup_l1d_flush(); -} - -static void vmx_exit(void) -{ - kvm_exit(); - kvm_x86_vendor_exit(); - - __vmx_exit(); -} -module_exit(vmx_exit); - -static int __init vmx_init(void) +int __init vmx_init(void) { int r, cpu; =20 - if (!kvm_is_vmx_supported()) - return -EOPNOTSUPP; - /* * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing * to unwind if a later step fails. @@ -8691,6 +8683,7 @@ static int __init vmx_init(void) if (r) goto err_kvm_init; =20 + return 0; =20 err_kvm_init: @@ -8699,4 +8692,3 @@ static int __init vmx_init(void) kvm_x86_vendor_exit(); return r; } -module_init(vmx_init); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6b7f89fd2d47..e62daa2c3017 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -15,6 +15,7 @@ * Amit Shah * Ben-Ami Yassour */ +#include #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt =20 #include @@ -13678,15 +13679,22 @@ static int __init kvm_x86_init(void) { kvm_mmu_x86_module_init(); mitigate_smt_rsb &=3D boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possib= le(); - return 0; + + if (kvm_is_svm_supported()) + return svm_init(); + else if (kvm_is_vmx_supported()) + return vmx_init(); + + pr_err_ratelimited("kvm: no hardware support for SVM or VMX\n"); + return -EOPNOTSUPP; } module_init(kvm_x86_init); =20 static void __exit kvm_x86_exit(void) { - /* - * If module_init() is implemented, module_exit() must also be - * implemented to allow module unload. - */ + kvm_exit(); + kvm_x86_vendor_exit(); + kvm_x86_ops.vendor_exit(); + } module_exit(kvm_x86_exit); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 6b5490319d1b..322be05e6c5b 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -9,6 +9,21 @@ #include "kvm_cache_regs.h" #include "kvm_emulate.h" =20 +#ifdef CONFIG_KVM_AMD +bool kvm_is_svm_supported(void); +int __init svm_init(void); +void svm_module_exit(void); +#else +bool kvm_is_svm_supported(void) { return false; } +#endif +#ifdef CONFIG_KVM_INTEL +bool kvm_is_vmx_supported(void); +int __init vmx_init(void); +void vmx_module_exit(void); +#else +bool kvm_is_vmx_supported(void) { return false; } +#endif + struct kvm_caps { /* control of guest tsc rate supported? */ bool has_tsc_control; --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A76C3C4332F for ; Tue, 7 Nov 2023 20:20:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343623AbjKGUU0 (ORCPT ); Tue, 7 Nov 2023 15:20:26 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40866 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234492AbjKGUUV (ORCPT ); Tue, 7 Nov 2023 15:20:21 -0500 Received: from mail-yw1-x1149.google.com (mail-yw1-x1149.google.com [IPv6:2607:f8b0:4864:20::1149]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 24B9A10C7 for ; Tue, 7 Nov 2023 12:20:19 -0800 (PST) Received: by mail-yw1-x1149.google.com with SMTP id 00721157ae682-5a90d6ab944so82580837b3.2 for ; Tue, 07 Nov 2023 12:20:19 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388418; x=1699993218; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=HFazlnKy/oiKcoNTDFXdkEU774gU29WG9sFZNXKt1+4=; b=bZBHd76B3PmshImf3KyvGUD3LDdn8mKmwef9YD28nazBIvbFp9orODzOo5HTzj5zEK ocSUuEMOm4bJAD8cxk5p7g4vEcQf2k9fq6yF576W+jICZhTxdytn0heyu9dbY9QuWa3S QLrdM1TcI0fpRRxhNPjD2C5vL1+Sv+krJuO/T6vuuK0c/CCmPZZoayqy8lCQZnadJFxZ qCiEVsOfmNjOY0RBDMUj2ieZGY1lBVgnohSTabIXcCjB5NlpfONCGekHeY8DEO72akzo m4tUSO/Zui1GLqDcP5neVUeUeK+XjyZEtxaaxASkod0zuQOHYz4B+aVzjnPy/rJf++4X 8a5w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388418; x=1699993218; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=HFazlnKy/oiKcoNTDFXdkEU774gU29WG9sFZNXKt1+4=; b=sY3FtSFc1FW4y/814sy5huS+2HexJcmuqtvg8sb34BP4H17lzk20sk1gjrEYqTqOmR zv4VzZANE1wMF4Mf2gkxMZvUOSso79JhmRYeOArGzTXDB/w16LlrxYYrKPrG7XrvhzNX DcbbY83pqP24DV9bqyhMMFahGBN1THVlaW7zxE3DBCeXuBqeEPeUB+zgjmvuXUnnPOfn NRcToF4IGIw925NwL0nPzCsaFlpICXuPq+svljF2lMTOkTk3zTvUXkMmoKSS7a87Zb7b jSG2cQJz5wPHaxrdl+ERiFsCJqQsv66be1hYmMkGEyeQXyIo2IB3z+zejgfnhFzou0NB gicA== X-Gm-Message-State: AOJu0Yxn2QZxp+R5HvxlkTq2xM9aKNgFPJyJyFhVbTfngJIvAig3JIBp TlZD6FnLa4ilVa5LOwhyY0/J1XtuAy5rKg== X-Google-Smtp-Source: AGHT+IHifkx3V9Ybwf/gyCpCeRDn4X+LkKh0bGSrR7G0PN3YPo5WxTxDEOadOW/0Z/HbCYnSNBv8M2sAvb1eyA== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a0d:d708:0:b0:5a7:b54e:bfc1 with SMTP id z8-20020a0dd708000000b005a7b54ebfc1mr289928ywd.10.1699388418403; Tue, 07 Nov 2023 12:20:18 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:51 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-4-aghulati@google.com> Subject: [RFC PATCH 03/14] KVM: x86: Remove unused exports From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Remove all the unused exports from KVM now that vendor modules no longer exist. Signed-off-by: Anish Ghulati --- arch/x86/kvm/cpuid.c | 7 -- arch/x86/kvm/hyperv.c | 2 - arch/x86/kvm/irq.c | 3 - arch/x86/kvm/irq_comm.c | 2 - arch/x86/kvm/kvm_onhyperv.c | 3 - arch/x86/kvm/lapic.c | 15 ---- arch/x86/kvm/mmu/mmu.c | 12 ---- arch/x86/kvm/mmu/spte.c | 4 -- arch/x86/kvm/mtrr.c | 1 - arch/x86/kvm/pmu.c | 2 - arch/x86/kvm/x86.c | 140 ------------------------------------ 11 files changed, 191 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 0544e30b4946..01de1f659beb 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -34,7 +34,6 @@ * aligned to sizeof(unsigned long) because it's not accessed via bitops. */ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; -EXPORT_SYMBOL_GPL(kvm_cpu_caps); =20 u32 xstate_required_size(u64 xstate_bv, bool compacted) { @@ -310,7 +309,6 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) { __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpu= id_nent); } -EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); =20 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nen= t) { @@ -808,7 +806,6 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_RDPID); } } -EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); =20 struct kvm_cpuid_array { struct kvm_cpuid_entry2 *entries; @@ -1432,7 +1429,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(s= truct kvm_vcpu *vcpu, return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, function, index); } -EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); =20 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) @@ -1440,7 +1436,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct = kvm_vcpu *vcpu, return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); } -EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); =20 /* * Intel CPUID semantics treats any query for an out-of-range leaf as if t= he @@ -1560,7 +1555,6 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *= ebx, used_max_basic); return exact; } -EXPORT_SYMBOL_GPL(kvm_cpuid); =20 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) { @@ -1578,4 +1572,3 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, edx); return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 7c2dac6824e2..c093307dbfcb 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -917,7 +917,6 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) return false; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } -EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); =20 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) { @@ -929,7 +928,6 @@ int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); } -EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); =20 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) { diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index b2c397dd2bc6..88de44c8087e 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -89,7 +89,6 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) =20 return kvm_apic_has_interrupt(v) !=3D -1; /* LAPIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr); =20 /* * check if there is pending interrupt without @@ -102,7 +101,6 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) =20 return kvm_apic_has_interrupt(v) !=3D -1; /* LAPIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); =20 /* * Read pending interrupt(from non-APIC source) @@ -141,7 +139,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) =20 return kvm_get_apic_interrupt(v); /* APIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); =20 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 16d076a1b91a..fa12d7340844 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -120,7 +120,6 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel= _irq_routing_entry *e, irq->level =3D 1; irq->shorthand =3D APIC_DEST_NOSHORT; } -EXPORT_SYMBOL_GPL(kvm_set_msi_irq); =20 static inline bool kvm_msi_route_invalid(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e) @@ -356,7 +355,6 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kv= m_lapic_irq *irq, =20 return r =3D=3D 1; } -EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu); =20 #define IOAPIC_ROUTING_ENTRY(irq) \ { .gsi =3D irq, .type =3D KVM_IRQ_ROUTING_IRQCHIP, \ diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index ded0bd688c65..aed3fdbd4b92 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -101,13 +101,11 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t= start_gfn, gfn_t nr_pages) =20 return __hv_flush_remote_tlbs_range(kvm, &range); } -EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); =20 int hv_flush_remote_tlbs(struct kvm *kvm) { return __hv_flush_remote_tlbs_range(kvm, NULL); } -EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); =20 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) { @@ -121,4 +119,3 @@ void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t roo= t_tdp) spin_unlock(&kvm_arch->hv_root_tdp_lock); } } -EXPORT_SYMBOL_GPL(hv_track_root_tdp); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index dcd60b39e794..1009ef21248d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -682,7 +682,6 @@ bool __kvm_apic_update_irr(u32 *pir, void *regs, int *m= ax_irr) return ((max_updated_irr !=3D -1) && (max_updated_irr =3D=3D *max_irr)); } -EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); =20 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) { @@ -693,7 +692,6 @@ bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pi= r, int *max_irr) apic->irr_pending =3D true; return irr_updated; } -EXPORT_SYMBOL_GPL(kvm_apic_update_irr); =20 static inline int apic_search_irr(struct kvm_lapic *apic) { @@ -736,7 +734,6 @@ void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec) { apic_clear_irr(vec, vcpu->arch.apic); } -EXPORT_SYMBOL_GPL(kvm_apic_clear_irr); =20 static inline void apic_set_isr(int vec, struct kvm_lapic *apic) { @@ -811,7 +808,6 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) */ return apic_find_highest_irr(vcpu->arch.apic); } -EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); =20 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, @@ -973,7 +969,6 @@ void kvm_apic_update_ppr(struct kvm_vcpu *vcpu) { apic_update_ppr(vcpu->arch.apic); } -EXPORT_SYMBOL_GPL(kvm_apic_update_ppr); =20 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) { @@ -1084,7 +1079,6 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struc= t kvm_lapic *source, return false; } } -EXPORT_SYMBOL_GPL(kvm_apic_match_dest); =20 int kvm_vector_to_index(u32 vector, u32 dest_vcpus, const unsigned long *bitmap, u32 bitmap_size) @@ -1497,7 +1491,6 @@ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vc= pu, int vector) kvm_ioapic_send_eoi(apic, vector); kvm_make_request(KVM_REQ_EVENT, apic->vcpu); } -EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); =20 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) { @@ -1522,7 +1515,6 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 ic= r_low, u32 icr_high) =20 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); } -EXPORT_SYMBOL_GPL(kvm_apic_send_ipi); =20 static u32 apic_get_tmcct(struct kvm_lapic *apic) { @@ -1638,7 +1630,6 @@ u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *api= c) =20 return valid_reg_mask; } -EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask); =20 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, void *data) @@ -1872,7 +1863,6 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) lapic_timer_int_injected(vcpu)) __kvm_wait_lapic_expire(vcpu); } -EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire); =20 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic) { @@ -2185,7 +2175,6 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) out: preempt_enable(); } -EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); =20 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) { @@ -2438,7 +2427,6 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) { kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0); } -EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); =20 /* emulate APIC access in a trap manner */ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) @@ -2461,7 +2449,6 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u= 32 offset) kvm_lapic_reg_write(apic, offset, (u32)val); } } -EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); =20 void kvm_free_lapic(struct kvm_vcpu *vcpu) { @@ -2627,7 +2614,6 @@ int kvm_alloc_apic_access_page(struct kvm *kvm) mutex_unlock(&kvm->slots_lock); return ret; } -EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page); =20 void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu) { @@ -2858,7 +2844,6 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) __apic_update_ppr(apic, &ppr); return apic_has_interrupt_for_ppr(apic, ppr); } -EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt); =20 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e1d011c67cc6..e9e66d635688 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3610,7 +3610,6 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_m= mu *mmu, kvm_mmu_commit_zap_page(kvm, &invalid_list); write_unlock(&kvm->mmu_lock); } -EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); =20 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) { @@ -3637,7 +3636,6 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, s= truct kvm_mmu *mmu) =20 kvm_mmu_free_roots(kvm, mmu, roots_to_free); } -EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots); =20 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) @@ -4441,7 +4439,6 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 = error_code, =20 return r; } -EXPORT_SYMBOL_GPL(kvm_handle_page_fault); =20 #ifdef CONFIG_X86_64 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, @@ -4660,7 +4657,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new= _pgd) __clear_sp_write_flooding_count(sp); } } -EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); =20 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) @@ -5294,7 +5290,6 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u= nsigned long cr0, shadow_mmu_init_context(vcpu, context, cpu_role, root_role); kvm_mmu_new_pgd(vcpu, nested_cr3); } -EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); =20 static union kvm_cpu_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_di= rty, @@ -5348,7 +5343,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, b= ool execonly, =20 kvm_mmu_new_pgd(vcpu, new_eptp); } -EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); =20 static void init_kvm_softmmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) @@ -5413,7 +5407,6 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu) else init_kvm_softmmu(vcpu, cpu_role); } -EXPORT_SYMBOL_GPL(kvm_init_mmu); =20 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) { @@ -5449,7 +5442,6 @@ void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) kvm_mmu_unload(vcpu); kvm_init_mmu(vcpu); } -EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); =20 int kvm_mmu_load(struct kvm_vcpu *vcpu) { @@ -5763,7 +5755,6 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu= , gpa_t cr2_or_gpa, u64 err return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len); } -EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); =20 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mm= u *mmu, u64 addr, hpa_t root_hpa) @@ -5829,7 +5820,6 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, s= truct kvm_mmu *mmu, __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); } } -EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); =20 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { @@ -5846,7 +5836,6 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL= ); ++vcpu->stat.invlpg; } -EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); =20 =20 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long p= cid) @@ -5899,7 +5888,6 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_force= d_root_level, else max_huge_page_level =3D PG_LEVEL_2M; } -EXPORT_SYMBOL_GPL(kvm_configure_mmu); =20 /* The return value indicates if tlb flush on all vcpus is needed. */ typedef bool (*slot_rmaps_handler) (struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 4a599130e9c9..feb3bbb16d70 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -22,7 +22,6 @@ bool __read_mostly enable_mmio_caching =3D true; static bool __ro_after_init allow_mmio_caching; module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); -EXPORT_SYMBOL_GPL(enable_mmio_caching); =20 u64 __read_mostly shadow_host_writable_mask; u64 __read_mostly shadow_mmu_writable_mask; @@ -409,7 +408,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmi= o_mask, u64 access_mask) shadow_mmio_mask =3D mmio_mask; shadow_mmio_access_mask =3D access_mask; } -EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); =20 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) { @@ -420,7 +418,6 @@ void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) shadow_me_value =3D me_value; shadow_me_mask =3D me_mask; } -EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask); =20 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) { @@ -448,7 +445,6 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_e= xec_only) kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, VMX_EPT_RWX_MASK, 0); } -EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); =20 void kvm_mmu_reset_all_pte_masks(void) { diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 3eb6e7f47e96..409225d19ac5 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -685,7 +685,6 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu= , gfn_t gfn) =20 return type; } -EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); =20 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index edb89b51b383..f35511086046 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -27,7 +27,6 @@ #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 =20 struct x86_pmu_capability __read_mostly kvm_pmu_cap; -EXPORT_SYMBOL_GPL(kvm_pmu_cap); =20 /* Precise Distribution of Instructions Retired (PDIR) */ static const struct x86_cpu_id vmx_pebs_pdir_cpu[] =3D { @@ -773,7 +772,6 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 p= erf_hw_id) kvm_pmu_incr_counter(pmc); } } -EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event); =20 static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *= filter) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e62daa2c3017..0a8b94678928 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -95,7 +95,6 @@ struct kvm_caps kvm_caps __read_mostly =3D { .supported_mce_cap =3D MCG_CTL_P | MCG_SER_P, }; -EXPORT_SYMBOL_GPL(kvm_caps); =20 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) =20 @@ -149,7 +148,6 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); =20 bool __read_mostly report_ignored_msrs =3D true; module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); -EXPORT_SYMBOL_GPL(report_ignored_msrs); =20 unsigned int min_timer_period_us =3D 200; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); @@ -175,18 +173,15 @@ module_param(vector_hashing, bool, S_IRUGO); =20 bool __read_mostly enable_vmware_backdoor =3D false; module_param(enable_vmware_backdoor, bool, S_IRUGO); -EXPORT_SYMBOL_GPL(enable_vmware_backdoor); =20 /* * If nested=3D1, nested virtualization is supported */ bool __read_mostly nested =3D 1; module_param(nested, bool, S_IRUGO); -EXPORT_SYMBOL_GPL(nested); =20 bool __read_mostly enable_vnmi =3D 1; module_param(enable_vnmi, bool, S_IRUGO); -EXPORT_SYMBOL_GPL(enable_vnmi); =20 /* * Flags to manipulate forced emulation behavior (any non-zero value will @@ -201,7 +196,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); =20 /* Enable/disable PMU virtualization */ bool __read_mostly enable_pmu =3D true; -EXPORT_SYMBOL_GPL(enable_pmu); module_param(enable_pmu, bool, 0444); =20 bool __read_mostly eager_page_split =3D true; @@ -228,7 +222,6 @@ struct kvm_user_return_msrs { }; =20 u32 __read_mostly kvm_nr_uret_msrs; -EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; static struct kvm_user_return_msrs __percpu *user_return_msrs; =20 @@ -238,19 +231,14 @@ static struct kvm_user_return_msrs __percpu *user_ret= urn_msrs; | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) =20 u64 __read_mostly host_efer; -EXPORT_SYMBOL_GPL(host_efer); =20 bool __read_mostly allow_smaller_maxphyaddr =3D 0; -EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); =20 bool __read_mostly enable_apicv =3D true; -EXPORT_SYMBOL_GPL(enable_apicv); =20 u64 __read_mostly host_xss; -EXPORT_SYMBOL_GPL(host_xss); =20 u64 __read_mostly host_arch_capabilities; -EXPORT_SYMBOL_GPL(host_arch_capabilities); =20 const struct _kvm_stats_desc kvm_vm_stats_desc[] =3D { KVM_GENERIC_VM_STATS(), @@ -422,7 +410,6 @@ int kvm_add_user_return_msr(u32 msr) kvm_uret_msrs_list[kvm_nr_uret_msrs] =3D msr; return kvm_nr_uret_msrs++; } -EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); =20 int kvm_find_user_return_msr(u32 msr) { @@ -434,7 +421,6 @@ int kvm_find_user_return_msr(u32 msr) } return -1; } -EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); =20 static void kvm_user_return_msr_cpu_online(void) { @@ -471,7 +457,6 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u= 64 mask) } return 0; } -EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); =20 static void drop_user_return_notifiers(void) { @@ -491,7 +476,6 @@ enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) { return kvm_apic_mode(kvm_get_apic_base(vcpu)); } -EXPORT_SYMBOL_GPL(kvm_get_apic_mode); =20 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { @@ -526,7 +510,6 @@ noinstr void kvm_spurious_fault(void) /* Fault while not rebooting. We want the trace. */ BUG_ON(!kvm_rebooting); } -EXPORT_SYMBOL_GPL(kvm_spurious_fault); =20 #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 @@ -631,7 +614,6 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcp= u, ex->has_payload =3D false; ex->payload =3D 0; } -EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); =20 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int= vector, bool has_error_code, u32 error_code, @@ -743,20 +725,17 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsig= ned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); } -EXPORT_SYMBOL_GPL(kvm_queue_exception); =20 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); } -EXPORT_SYMBOL_GPL(kvm_requeue_exception); =20 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload) { kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); } -EXPORT_SYMBOL_GPL(kvm_queue_exception_p); =20 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code, unsigned long payload) @@ -774,7 +753,6 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) =20 return 1; } -EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); =20 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) { @@ -824,7 +802,6 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vc= pu, =20 fault_mmu->inject_page_fault(vcpu, fault); } -EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); =20 void kvm_inject_nmi(struct kvm_vcpu *vcpu) { @@ -836,13 +813,11 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, uns= igned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); } -EXPORT_SYMBOL_GPL(kvm_queue_exception_e); =20 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error= _code) { kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); } -EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); =20 /* * Checks if cpl <=3D required_cpl; if true, return true. Otherwise queue @@ -864,7 +839,6 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) kvm_queue_exception(vcpu, UD_VECTOR); return false; } -EXPORT_SYMBOL_GPL(kvm_require_dr); =20 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) { @@ -919,7 +893,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr= 3) =20 return 1; } -EXPORT_SYMBOL_GPL(load_pdptrs); =20 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { @@ -977,7 +950,6 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned l= ong old_cr0, unsigned lon !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); } -EXPORT_SYMBOL_GPL(kvm_post_set_cr0); =20 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { @@ -1018,13 +990,11 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long= cr0) =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr0); =20 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } -EXPORT_SYMBOL_GPL(kvm_lmsw); =20 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) { @@ -1047,7 +1017,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) write_pkru(vcpu->arch.pkru); } -EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); =20 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) { @@ -1073,7 +1042,6 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) } =20 } -EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); =20 #ifdef CONFIG_X86_64 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) @@ -1138,7 +1106,6 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) =20 return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); =20 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1150,7 +1117,6 @@ bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsign= ed long cr4) =20 return true; } -EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); =20 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1198,7 +1164,6 @@ void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned= long old_cr4, unsigned lon kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); =20 } -EXPORT_SYMBOL_GPL(kvm_post_set_cr4); =20 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1229,7 +1194,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long = cr4) =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr4); =20 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) { @@ -1321,7 +1285,6 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long = cr3) =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr3); =20 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { @@ -1333,7 +1296,6 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long = cr8) vcpu->arch.cr8 =3D cr8; return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr8); =20 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { @@ -1342,7 +1304,6 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) else return vcpu->arch.cr8; } -EXPORT_SYMBOL_GPL(kvm_get_cr8); =20 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) { @@ -1367,7 +1328,6 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu) if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |=3D KVM_DEBUGREG_BP_ENABLED; } -EXPORT_SYMBOL_GPL(kvm_update_dr7); =20 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { @@ -1408,7 +1368,6 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigne= d long val) =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_set_dr); =20 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { @@ -1428,7 +1387,6 @@ void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsign= ed long *val) break; } } -EXPORT_SYMBOL_GPL(kvm_get_dr); =20 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) { @@ -1444,7 +1402,6 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, data >> 32); return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); =20 /* * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) tr= ack @@ -1758,7 +1715,6 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) =20 return __kvm_valid_efer(vcpu, efer); } -EXPORT_SYMBOL_GPL(kvm_valid_efer); =20 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { @@ -1797,7 +1753,6 @@ void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &=3D ~mask; } -EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); =20 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) { @@ -1840,7 +1795,6 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index= , u32 type) =20 return allowed; } -EXPORT_SYMBOL_GPL(kvm_msr_allowed); =20 /* * Write @data into the MSR specified by @index. Select MSR specific fault @@ -1988,13 +1942,11 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u= 64 *data) { return kvm_get_msr_ignored_check(vcpu, index, data, false); } -EXPORT_SYMBOL_GPL(kvm_get_msr); =20 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) { return kvm_set_msr_ignored_check(vcpu, index, data, false); } -EXPORT_SYMBOL_GPL(kvm_set_msr); =20 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) { @@ -2083,7 +2035,6 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) =20 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } -EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); =20 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) { @@ -2108,7 +2059,6 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) =20 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } -EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); =20 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) { @@ -2120,14 +2070,12 @@ int kvm_emulate_invd(struct kvm_vcpu *vcpu) /* Treat an INVD instruction as a NOP and just skip it. */ return kvm_emulate_as_nop(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_invd); =20 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } -EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); =20 =20 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *in= sn) @@ -2143,13 +2091,11 @@ int kvm_emulate_mwait(struct kvm_vcpu *vcpu) { return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); } -EXPORT_SYMBOL_GPL(kvm_emulate_mwait); =20 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) { return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); } -EXPORT_SYMBOL_GPL(kvm_emulate_monitor); =20 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) { @@ -2222,7 +2168,6 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_= vcpu *vcpu) =20 return ret; } -EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); =20 /* * Adapt set_msr() to msr_io()'s calling convention @@ -2593,7 +2538,6 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_t= sc) return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); } -EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); =20 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multip= lier) { @@ -2608,7 +2552,6 @@ u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_= offset, u64 l2_multiplier) nested_offset +=3D l2_offset; return nested_offset; } -EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); =20 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) { @@ -2618,7 +2561,6 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier,= u64 l2_multiplier) =20 return l1_multiplier; } -EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); =20 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) { @@ -3525,7 +3467,6 @@ void kvm_service_local_tlb_flush_requests(struct kvm_= vcpu *vcpu) if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) kvm_vcpu_flush_tlb_guest(vcpu); } -EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); =20 static void record_steal_time(struct kvm_vcpu *vcpu) { @@ -4005,7 +3946,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct = msr_data *msr_info) } return 0; } -EXPORT_SYMBOL_GPL(kvm_set_msr_common); =20 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool ho= st) { @@ -4363,7 +4303,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct = msr_data *msr_info) } return 0; } -EXPORT_SYMBOL_GPL(kvm_get_msr_common); =20 /* * Read or write a bunch of msrs. All parameters are kernel addresses. @@ -7314,7 +7253,6 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, = gva_t gva, u64 access =3D (static_call(kvm_x86_get_cpl)(vcpu) =3D=3D 3) ? PFERR_USER= _MASK : 0; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } -EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); =20 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) @@ -7325,7 +7263,6 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu,= gva_t gva, access |=3D PFERR_WRITE_MASK; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } -EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); =20 /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, @@ -7411,7 +7348,6 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } -EXPORT_SYMBOL_GPL(kvm_read_guest_virt); =20 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, @@ -7483,7 +7419,6 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu= , gva_t addr, void *val, return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } -EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); =20 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len) @@ -7515,7 +7450,6 @@ int handle_ud(struct kvm_vcpu *vcpu) =20 return kvm_emulate_instruction(vcpu, emul_type); } -EXPORT_SYMBOL_GPL(handle_ud); =20 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t gpa, bool write) @@ -7985,7 +7919,6 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) kvm_emulate_wbinvd_noskip(vcpu); return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); =20 =20 =20 @@ -8460,7 +8393,6 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *v= cpu, int irq, int inc_eip) kvm_set_rflags(vcpu, ctxt->eflags); } } -EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); =20 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *dat= a, u8 ndata, u8 *insn_bytes, u8 insn_size) @@ -8526,13 +8458,11 @@ void __kvm_prepare_emulation_failure_exit(struct kv= m_vcpu *vcpu, u64 *data, { prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); } -EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); =20 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) { __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); } -EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); =20 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_t= ype) { @@ -8740,7 +8670,6 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vc= pu) r =3D kvm_vcpu_do_singlestep(vcpu); return r; } -EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); =20 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) { @@ -8873,7 +8802,6 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *= vcpu, int emulation_type, =20 return r; } -EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); =20 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len) @@ -9061,14 +8989,12 @@ int kvm_emulate_instruction(struct kvm_vcpu *vcpu, = int emulation_type) { return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); } -EXPORT_SYMBOL_GPL(kvm_emulate_instruction); =20 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, void *insn, int insn_len) { return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); } -EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); =20 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) { @@ -9163,7 +9089,6 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, uns= igned short port, int in) ret =3D kvm_fast_pio_out(vcpu, size, port); return ret && kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_fast_pio); =20 static int kvmclock_cpu_down_prep(unsigned int cpu) { @@ -9591,7 +9516,6 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) =20 return r; } -EXPORT_SYMBOL_GPL(kvm_x86_vendor_init); =20 void kvm_x86_vendor_exit(void) { @@ -9625,7 +9549,6 @@ void kvm_x86_vendor_exit(void) kvm_x86_ops.hardware_enable =3D NULL; mutex_unlock(&vendor_module_lock); } -EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); =20 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) { @@ -9650,7 +9573,6 @@ int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) { return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); } -EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); =20 int kvm_emulate_halt(struct kvm_vcpu *vcpu) { @@ -9661,7 +9583,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) */ return kvm_emulate_halt_noskip(vcpu) && ret; } -EXPORT_SYMBOL_GPL(kvm_emulate_halt); =20 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) { @@ -9670,7 +9591,6 @@ int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret; } -EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); =20 #ifdef CONFIG_X86_64 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, @@ -9734,7 +9654,6 @@ bool kvm_apicv_activated(struct kvm *kvm) { return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) =3D=3D 0); } -EXPORT_SYMBOL_GPL(kvm_apicv_activated); =20 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) { @@ -9743,7 +9662,6 @@ bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) =20 return (vm_reasons | vcpu_reasons) =3D=3D 0; } -EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); =20 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, enum kvm_apicv_inhibit reason, bool set) @@ -9917,7 +9835,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ++vcpu->stat.hypercalls; return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); =20 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { @@ -10355,7 +10272,6 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) preempt_enable(); up_read(&vcpu->kvm->arch.apicv_update_lock); } -EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv); =20 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) { @@ -10431,7 +10347,6 @@ void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); up_write(&kvm->arch.apicv_update_lock); } -EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); =20 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { @@ -10490,7 +10405,6 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *= vcpu) { smp_send_reschedule(vcpu->cpu); } -EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); =20 /* * Called within kvm->srcu read side. @@ -11467,7 +11381,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_= selector, int idt_index, kvm_set_rflags(vcpu, ctxt->eflags); return 1; } -EXPORT_SYMBOL_GPL(kvm_task_switch); =20 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sr= egs) { @@ -12159,7 +12072,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool ini= t_event) if (init_event) kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); } -EXPORT_SYMBOL_GPL(kvm_vcpu_reset); =20 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) { @@ -12171,7 +12083,6 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *= vcpu, u8 vector) kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); } -EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); =20 int kvm_arch_hardware_enable(void) { @@ -12286,7 +12197,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) } =20 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); -EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); =20 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { @@ -12475,7 +12385,6 @@ void __user * __x86_set_memory_region(struct kvm *k= vm, int id, gpa_t gpa, =20 return (void __user *)hva; } -EXPORT_SYMBOL_GPL(__x86_set_memory_region); =20 void kvm_arch_pre_destroy_vm(struct kvm *kvm) { @@ -12939,13 +12848,11 @@ unsigned long kvm_get_linear_rip(struct kvm_vcpu = *vcpu) return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + kvm_rip_read(vcpu)); } -EXPORT_SYMBOL_GPL(kvm_get_linear_rip); =20 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { return kvm_get_linear_rip(vcpu) =3D=3D linear_rip; } -EXPORT_SYMBOL_GPL(kvm_is_linear_rip); =20 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { @@ -12956,7 +12863,6 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) rflags &=3D ~X86_EFLAGS_TF; return rflags; } -EXPORT_SYMBOL_GPL(kvm_get_rflags); =20 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { @@ -12971,7 +12877,6 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned= long rflags) __kvm_set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } -EXPORT_SYMBOL_GPL(kvm_set_rflags); =20 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { @@ -13188,37 +13093,31 @@ void kvm_arch_start_assignment(struct kvm *kvm) if (atomic_inc_return(&kvm->arch.assigned_device_count) =3D=3D 1) static_call_cond(kvm_x86_pi_start_assignment)(kvm); } -EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); =20 void kvm_arch_end_assignment(struct kvm *kvm) { atomic_dec(&kvm->arch.assigned_device_count); } -EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); =20 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) { return raw_atomic_read(&kvm->arch.assigned_device_count); } -EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); =20 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { atomic_inc(&kvm->arch.noncoherent_dma_count); } -EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); =20 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { atomic_dec(&kvm->arch.noncoherent_dma_count); } -EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); =20 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return atomic_read(&kvm->arch.noncoherent_dma_count); } -EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); =20 bool kvm_arch_has_irq_bypass(void) { @@ -13291,8 +13190,6 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) { return (vcpu->arch.msr_kvm_poll_control & 1) =3D=3D 0; } -EXPORT_SYMBOL_GPL(kvm_arch_no_poll); - =20 int kvm_spec_ctrl_test_value(u64 value) { @@ -13318,7 +13215,6 @@ int kvm_spec_ctrl_test_value(u64 value) =20 return ret; } -EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); =20 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 e= rror_code) { @@ -13343,7 +13239,6 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu = *vcpu, gva_t gva, u16 error_c } vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); } -EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); =20 /* * Handles kvm_read/write_guest_virt*() result and either injects #PF or r= eturns @@ -13372,7 +13267,6 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu= , int r, =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); =20 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gv= a) { @@ -13432,7 +13326,6 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsig= ned long type, gva_t gva) return 1; } } -EXPORT_SYMBOL_GPL(kvm_handle_invpcid); =20 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) { @@ -13517,7 +13410,6 @@ int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gp= a_t gpa, unsigned int bytes, =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); =20 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int by= tes, void *data) @@ -13555,7 +13447,6 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa= _t gpa, unsigned int bytes, =20 return 0; } -EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); =20 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned co= unt, int size) { @@ -13643,37 +13534,6 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, un= signed int size, return in ? kvm_sev_es_ins(vcpu, size, port) : kvm_sev_es_outs(vcpu, size, port); } -EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); - -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); =20 static int __init kvm_x86_init(void) { --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C5DEFC4167D for ; Tue, 7 Nov 2023 20:20:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343755AbjKGUUi (ORCPT ); Tue, 7 Nov 2023 15:20:38 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40920 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235142AbjKGUUX (ORCPT ); Tue, 7 Nov 2023 15:20:23 -0500 Received: from mail-ot1-x34a.google.com (mail-ot1-x34a.google.com [IPv6:2607:f8b0:4864:20::34a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 6191D10CB for ; Tue, 7 Nov 2023 12:20:21 -0800 (PST) Received: by mail-ot1-x34a.google.com with SMTP id 46e09a7af769-6d31f3feb12so7924531a34.1 for ; Tue, 07 Nov 2023 12:20:21 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388420; x=1699993220; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=yHtSZXfxM41A5xelmirKq8UJ34KMEqQJ6uQM49e88n4=; b=PosLP695pD3EvW9V3sE+ynXunoprW2/1tAzhImNB1RSMEGZSIdgQn41cEO9ffluZET lh+fiajt4dPLNK5EJWAaSh23YOUv5JBPUF9/GED2yHF53pQZY2a40jXtTAP8EENtT7Ql HqO38p5Kz8JHGTbC+xfJwxYDt7+B5wQTEu5NmUtSDtHUVujVjWllZzXztmHcRLrC38Fu AVozGNLv1g6hOv1GiQ4+LUP3jznj4A31GEUs+ZmwnGrW+1y21ZL5Nlib1p6HWFwYDqE7 LqyYg4Wbe6Jt2iq94qnmS/dVoEqHVaqWRMOzSuXoTbfAJ0Yu3LCI9umyzTdG16i45xA5 tfhA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388420; x=1699993220; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=yHtSZXfxM41A5xelmirKq8UJ34KMEqQJ6uQM49e88n4=; b=FVq7PNxwJ52XVd692dZFFIK/1J86MJw0+ROg5DGSs9m0QnuC+lGoS0L/oT3ZQHdRhp SWaTLq9eDEdobTOnFMmZJa0Vo8BC6NZFIsyMBwsISPBj6pnhX5TH5kBcAEGfrEyWImX+ 4nAfyVmBbLjj/lcgRSe5XsNhwIdx+nWgsvTLOvLHll0mB+7Uc8QXVO6ctNN5oRM5tZA7 bN9GS9+C/SI9yL4LHmjw+ccga7WQVW7zG9H8WrgkIXc18DOQx2FqFvhgSK5ewMZQdtBh sXyZF6GK0fNsFWiZNdl8epOqlBH2dcSpLa/J8HJyBZKLygunodX8kmy+9cb5zZZbR/WE tnNw== X-Gm-Message-State: AOJu0YzTHmo084yiM7kLPX72FPl5ygEivIDuM4btEiTyd74OJMsSS01+ bYB2A+S5eHnjtGxg2lzX4Du2inYlr/3oiQ== X-Google-Smtp-Source: AGHT+IH+sJeS0ROAOq43lwtwQUvZBoMusw05VPI8LA5O035oWce3ho1ohN0fd1vRG/n1OuGrAfAPNCHaW1P53w== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a05:6830:26da:b0:6c4:7e6c:cb4e with SMTP id m26-20020a05683026da00b006c47e6ccb4emr9658721otu.5.1699388420723; Tue, 07 Nov 2023 12:20:20 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:52 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-5-aghulati@google.com> Subject: [RFC PATCH 04/14] KVM: x86: Create stubs for a new VAC module From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Create emtpy stub files for what will eventually be a new module named Virtualization Acceleration Code: Unupgradable Units Module (backronym: VACUUM, or VAC for short). VAC will function as a base module for multiple KVM modules and will contain the code needed to manage system-wide virtualization resources, like enabling/disabling virtualization hardware. Signed-off-by: Anish Ghulati --- arch/x86/kvm/Makefile | 2 ++ arch/x86/kvm/svm/vac.c | 2 ++ arch/x86/kvm/vac.c | 3 +++ arch/x86/kvm/vac.h | 6 ++++++ arch/x86/kvm/vmx/vac.c | 2 ++ virt/kvm/Makefile.kvm | 1 + virt/kvm/vac.c | 3 +++ virt/kvm/vac.h | 6 ++++++ 8 files changed, 25 insertions(+) create mode 100644 arch/x86/kvm/svm/vac.c create mode 100644 arch/x86/kvm/vac.c create mode 100644 arch/x86/kvm/vac.h create mode 100644 arch/x86/kvm/vmx/vac.c create mode 100644 virt/kvm/vac.c create mode 100644 virt/kvm/vac.h diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 3e965c90e065..b3de4bd7988f 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -13,6 +13,8 @@ kvm-y +=3D x86.o emulate.o i8259.o irq.o lapic.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ mmu/spte.o =20 +kvm-y +=3D vac.o vmx/vac.o svm/vac.o + ifdef CONFIG_HYPERV kvm-y +=3D kvm_onhyperv.o endif diff --git a/arch/x86/kvm/svm/vac.c b/arch/x86/kvm/svm/vac.c new file mode 100644 index 000000000000..4aabf16d2fc0 --- /dev/null +++ b/arch/x86/kvm/svm/vac.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only + diff --git a/arch/x86/kvm/vac.c b/arch/x86/kvm/vac.c new file mode 100644 index 000000000000..18d2ae7d3e47 --- /dev/null +++ b/arch/x86/kvm/vac.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vac.h" diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h new file mode 100644 index 000000000000..4d5dc4700f4e --- /dev/null +++ b/arch/x86/kvm/vac.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef ARCH_X86_KVM_VAC_H +#define ARCH_X86_KVM_VAC_H + +#endif // ARCH_X86_KVM_VAC_H diff --git a/arch/x86/kvm/vmx/vac.c b/arch/x86/kvm/vmx/vac.c new file mode 100644 index 000000000000..4aabf16d2fc0 --- /dev/null +++ b/arch/x86/kvm/vmx/vac.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only + diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm index 4de10d447ef3..7876021ea4d7 100644 --- a/virt/kvm/Makefile.kvm +++ b/virt/kvm/Makefile.kvm @@ -11,6 +11,7 @@ kvm-y :=3D $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/bina= ry_stats.o ifdef CONFIG_VFIO kvm-y +=3D $(KVM)/vfio.o endif +kvm-y +=3D $(KVM)/vac.o kvm-$(CONFIG_KVM_MMIO) +=3D $(KVM)/coalesced_mmio.o kvm-$(CONFIG_KVM_ASYNC_PF) +=3D $(KVM)/async_pf.o kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) +=3D $(KVM)/irqchip.o diff --git a/virt/kvm/vac.c b/virt/kvm/vac.c new file mode 100644 index 000000000000..18d2ae7d3e47 --- /dev/null +++ b/virt/kvm/vac.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vac.h" diff --git a/virt/kvm/vac.h b/virt/kvm/vac.h new file mode 100644 index 000000000000..8f7123a916c5 --- /dev/null +++ b/virt/kvm/vac.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __KVM_VAC_H__ +#define __KVM_VAC_H__ + +#endif --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D4027C4167B for ; Tue, 7 Nov 2023 20:20:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235182AbjKGUUl (ORCPT ); Tue, 7 Nov 2023 15:20:41 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45402 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232216AbjKGUUg (ORCPT ); Tue, 7 Nov 2023 15:20:36 -0500 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 6BD4710D9 for ; Tue, 7 Nov 2023 12:20:23 -0800 (PST) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-5a7b9e83b70so700557b3.0 for ; Tue, 07 Nov 2023 12:20:23 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388422; x=1699993222; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=G4+jn87rMIg4E1shvnY+9vzkKef1Ww/NQwvl7+a5xc4=; b=wgqasSSDGo/8NyyNhTXPx9THUPwouqoTyt+GBMHt8qnkygsq26SbK7332CtNE3BQ4Y DaCNbBepDl3fZ44NoMl9fahyPCSma/xVniwTjkK8AgYQnF7dHL5pAnyIyCPc8Qi0aiLC np6irPmA2NAwmI4vcnqmumZWutzXcOTGPBJP2M3/O/HvFBpGz5uZbIWhJgtI7co8Bhf9 hEEbMghPC5jxATZuF+G2u9HV1F5I4t7ga6LhnWtrgXNC3MEQQ7qmHZ2AdLGJV5nH0ZAl 2Wibu+c6a+ofKx+fgtjUGmm2QcWmOdE1WLr+2ht4pFUTf6MafeIwNVM1/4ucI581rTHb lMYA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388422; x=1699993222; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=G4+jn87rMIg4E1shvnY+9vzkKef1Ww/NQwvl7+a5xc4=; b=O+31MEUhMW3lX2lCU81qBXUIDi/9oVBBw3TwWY2VhhsiHX2Fd47/YxvfL28TdErJ+v t0U9hXBHHWhLz0jPHLc5Cdgoe/uPKNxAZThAnvcfw4M1q+FVGhmI/PkqriVPG8mG/QpY gqGLE3zmoZdCkXVTiwsrs4Kuepel2bDnUxQgJZe1vr1eRdmudyl1Kcj8G5lQWJAPFmWs vnQ93IPMijeq14kjoCIm/dxp1wOD9DQ+L95Q+Pi5EopQcYrCpadUrQsmV+YaNMLzylQ2 hMi+zrhuIT+f4fiNt/hKUCIeEz9QhU9B48/3B1BS/7vY5AGBcUJQZG7C+Zc2LTFFNdpb RZDw== X-Gm-Message-State: AOJu0YxH/KQtZM8QVqS6IR4P+XCFIMueUp0QRrtVdWDS0YR4irSk0Ux2 mcKOcQzssQCvqTT34EfjkxHFqMo//DBypQ== X-Google-Smtp-Source: AGHT+IEsTO9DwkSwbHP7/Lon4nUEYrXwHVJx+yshQ+l/R2Tk3D7Q5HrFIbWNrmOaRRvb20od3YsHAt1MUiccYA== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a05:690c:88f:b0:5a7:d45d:1223 with SMTP id cd15-20020a05690c088f00b005a7d45d1223mr91894ywb.3.1699388422661; Tue, 07 Nov 2023 12:20:22 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:53 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-6-aghulati@google.com> Subject: [RFC PATCH 05/14] KVM: x86: Refactor hardware enable/disable operations into a new file From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move KVM's hardware enabling to vac.{h,c} as a first step towards building VAC and all of the system-wide virtualization support as a separate module. Defer moving arch code to future patches to keep the diff reasonable. No functional change intended. Signed-off-by: Anish Ghulati --- virt/kvm/kvm_main.c | 197 +------------------------------------------- virt/kvm/vac.c | 177 +++++++++++++++++++++++++++++++++++++++ virt/kvm/vac.h | 26 ++++++ 3 files changed, 204 insertions(+), 196 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f585a159b4f5..fb50deaad3fd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -59,6 +59,7 @@ #include "coalesced_mmio.h" #include "async_pf.h" #include "kvm_mm.h" +#include "vac.h" #include "vfio.h" =20 #include @@ -140,8 +141,6 @@ static int kvm_no_compat_open(struct inode *inode, stru= ct file *file) #define KVM_COMPAT(c) .compat_ioctl =3D kvm_no_compat_ioctl, \ .open =3D kvm_no_compat_open #endif -static int hardware_enable_all(void); -static void hardware_disable_all(void); =20 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); =20 @@ -5167,200 +5166,6 @@ static struct miscdevice kvm_dev =3D { &kvm_chardev_ops, }; =20 -#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING -__visible bool kvm_rebooting; -EXPORT_SYMBOL_GPL(kvm_rebooting); - -static DEFINE_PER_CPU(bool, hardware_enabled); -static int kvm_usage_count; - -static int __hardware_enable_nolock(void) -{ - if (__this_cpu_read(hardware_enabled)) - return 0; - - if (kvm_arch_hardware_enable()) { - pr_info("kvm: enabling virtualization on CPU%d failed\n", - raw_smp_processor_id()); - return -EIO; - } - - __this_cpu_write(hardware_enabled, true); - return 0; -} - -static void hardware_enable_nolock(void *failed) -{ - if (__hardware_enable_nolock()) - atomic_inc(failed); -} - -static int kvm_online_cpu(unsigned int cpu) -{ - int ret =3D 0; - - /* - * Abort the CPU online process if hardware virtualization cannot - * be enabled. Otherwise running VMs would encounter unrecoverable - * errors when scheduled to this CPU. - */ - mutex_lock(&kvm_lock); - if (kvm_usage_count) - ret =3D __hardware_enable_nolock(); - mutex_unlock(&kvm_lock); - return ret; -} - -static void hardware_disable_nolock(void *junk) -{ - /* - * Note, hardware_disable_all_nolock() tells all online CPUs to disable - * hardware, not just CPUs that successfully enabled hardware! - */ - if (!__this_cpu_read(hardware_enabled)) - return; - - kvm_arch_hardware_disable(); - - __this_cpu_write(hardware_enabled, false); -} - -static int kvm_offline_cpu(unsigned int cpu) -{ - mutex_lock(&kvm_lock); - if (kvm_usage_count) - hardware_disable_nolock(NULL); - mutex_unlock(&kvm_lock); - return 0; -} - -static void hardware_disable_all_nolock(void) -{ - BUG_ON(!kvm_usage_count); - - kvm_usage_count--; - if (!kvm_usage_count) - on_each_cpu(hardware_disable_nolock, NULL, 1); -} - -static void hardware_disable_all(void) -{ - cpus_read_lock(); - mutex_lock(&kvm_lock); - hardware_disable_all_nolock(); - mutex_unlock(&kvm_lock); - cpus_read_unlock(); -} - -static int hardware_enable_all(void) -{ - atomic_t failed =3D ATOMIC_INIT(0); - int r; - - /* - * Do not enable hardware virtualization if the system is going down. - * If userspace initiated a forced reboot, e.g. reboot -f, then it's - * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling - * after kvm_reboot() is called. Note, this relies on system_state - * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops - * hook instead of registering a dedicated reboot notifier (the latter - * runs before system_state is updated). - */ - if (system_state =3D=3D SYSTEM_HALT || system_state =3D=3D SYSTEM_POWER_O= FF || - system_state =3D=3D SYSTEM_RESTART) - return -EBUSY; - - /* - * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu() - * is called, and so on_each_cpu() between them includes the CPU that - * is being onlined. As a result, hardware_enable_nolock() may get - * invoked before kvm_online_cpu(), which also enables hardware if the - * usage count is non-zero. Disable CPU hotplug to avoid attempting to - * enable hardware multiple times. - */ - cpus_read_lock(); - mutex_lock(&kvm_lock); - - r =3D 0; - - kvm_usage_count++; - if (kvm_usage_count =3D=3D 1) { - on_each_cpu(hardware_enable_nolock, &failed, 1); - - if (atomic_read(&failed)) { - hardware_disable_all_nolock(); - r =3D -EBUSY; - } - } - - mutex_unlock(&kvm_lock); - cpus_read_unlock(); - - return r; -} - -static void kvm_shutdown(void) -{ - /* - * Disable hardware virtualization and set kvm_rebooting to indicate - * that KVM has asynchronously disabled hardware virtualization, i.e. - * that relevant errors and exceptions aren't entirely unexpected. - * Some flavors of hardware virtualization need to be disabled before - * transferring control to firmware (to perform shutdown/reboot), e.g. - * on x86, virtualization can block INIT interrupts, which are used by - * firmware to pull APs back under firmware control. Note, this path - * is used for both shutdown and reboot scenarios, i.e. neither name is - * 100% comprehensive. - */ - pr_info("kvm: exiting hardware virtualization\n"); - kvm_rebooting =3D true; - on_each_cpu(hardware_disable_nolock, NULL, 1); -} - -static int kvm_suspend(void) -{ - /* - * Secondary CPUs and CPU hotplug are disabled across the suspend/resume - * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count - * is stable. Assert that kvm_lock is not held to ensure the system - * isn't suspended while KVM is enabling hardware. Hardware enabling - * can be preempted, but the task cannot be frozen until it has dropped - * all locks (userspace tasks are frozen via a fake signal). - */ - lockdep_assert_not_held(&kvm_lock); - lockdep_assert_irqs_disabled(); - - if (kvm_usage_count) - hardware_disable_nolock(NULL); - return 0; -} - -static void kvm_resume(void) -{ - lockdep_assert_not_held(&kvm_lock); - lockdep_assert_irqs_disabled(); - - if (kvm_usage_count) - WARN_ON_ONCE(__hardware_enable_nolock()); -} - -static struct syscore_ops kvm_syscore_ops =3D { - .suspend =3D kvm_suspend, - .resume =3D kvm_resume, - .shutdown =3D kvm_shutdown, -}; -#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ -static int hardware_enable_all(void) -{ - return 0; -} - -static void hardware_disable_all(void) -{ - -} -#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ - static void kvm_iodevice_destructor(struct kvm_io_device *dev) { if (dev->ops->destructor) diff --git a/virt/kvm/vac.c b/virt/kvm/vac.c index 18d2ae7d3e47..ff034a53af50 100644 --- a/virt/kvm/vac.c +++ b/virt/kvm/vac.c @@ -1,3 +1,180 @@ // SPDX-License-Identifier: GPL-2.0-only =20 #include "vac.h" + +#include +#include +#include + +#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING +DEFINE_MUTEX(vac_lock); + +__visible bool kvm_rebooting; +EXPORT_SYMBOL_GPL(kvm_rebooting); + +static DEFINE_PER_CPU(bool, hardware_enabled); +static int kvm_usage_count; + +static int __hardware_enable_nolock(void) +{ + if (__this_cpu_read(hardware_enabled)) + return 0; + + if (kvm_arch_hardware_enable()) { + pr_info("kvm: enabling virtualization on CPU%d failed\n", + raw_smp_processor_id()); + return -EIO; + } + + __this_cpu_write(hardware_enabled, true); + return 0; +} + +static void hardware_enable_nolock(void *failed) +{ + if (__hardware_enable_nolock()) + atomic_inc(failed); +} + +int kvm_online_cpu(unsigned int cpu) +{ + int ret =3D 0; + + /* + * Abort the CPU online process if hardware virtualization cannot + * be enabled. Otherwise running VMs would encounter unrecoverable + * errors when scheduled to this CPU. + */ + mutex_lock(&vac_lock); + if (kvm_usage_count) + ret =3D __hardware_enable_nolock(); + mutex_unlock(&vac_lock); + return ret; +} + +static void hardware_disable_nolock(void *junk) +{ + /* + * Note, hardware_disable_all_nolock() tells all online CPUs to disable + * hardware, not just CPUs that successfully enabled hardware! + */ + if (!__this_cpu_read(hardware_enabled)) + return; + + kvm_arch_hardware_disable(); + + __this_cpu_write(hardware_enabled, false); +} + +int kvm_offline_cpu(unsigned int cpu) +{ + mutex_lock(&vac_lock); + if (kvm_usage_count) + hardware_disable_nolock(NULL); + mutex_unlock(&vac_lock); + return 0; +} + +static void hardware_disable_all_nolock(void) +{ + BUG_ON(!kvm_usage_count); + + kvm_usage_count--; + if (!kvm_usage_count) + on_each_cpu(hardware_disable_nolock, NULL, 1); +} + +void hardware_disable_all(void) +{ + cpus_read_lock(); + mutex_lock(&vac_lock); + hardware_disable_all_nolock(); + mutex_unlock(&vac_lock); + cpus_read_unlock(); +} + +int hardware_enable_all(void) +{ + atomic_t failed =3D ATOMIC_INIT(0); + int r =3D 0; + + /* + * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu() + * is called, and so on_each_cpu() between them includes the CPU that + * is being onlined. As a result, hardware_enable_nolock() may get + * invoked before kvm_online_cpu(), which also enables hardware if the + * usage count is non-zero. Disable CPU hotplug to avoid attempting to + * enable hardware multiple times. + */ + cpus_read_lock(); + mutex_lock(&vac_lock); + + kvm_usage_count++; + if (kvm_usage_count =3D=3D 1) { + on_each_cpu(hardware_enable_nolock, &failed, 1); + + if (atomic_read(&failed)) { + hardware_disable_all_nolock(); + r =3D -EBUSY; + } + } + + mutex_unlock(&vac_lock); + cpus_read_unlock(); + + return r; +} + +static int kvm_reboot(struct notifier_block *notifier, unsigned long val, + void *v) +{ + /* + * Some (well, at least mine) BIOSes hang on reboot if + * in vmx root mode. + * + * And Intel TXT required VMX off for all cpu when system shutdown. + */ + pr_info("kvm: exiting hardware virtualization\n"); + kvm_rebooting =3D true; + on_each_cpu(hardware_disable_nolock, NULL, 1); + return NOTIFY_OK; +} + +static int kvm_suspend(void) +{ + /* + * Secondary CPUs and CPU hotplug are disabled across the suspend/resume + * callbacks, i.e. no need to acquire vac_lock to ensure the usage count + * is stable. Assert that vac_lock is not held to ensure the system + * isn't suspended while KVM is enabling hardware. Hardware enabling + * can be preempted, but the task cannot be frozen until it has dropped + * all locks (userspace tasks are frozen via a fake signal). + */ + lockdep_assert_not_held(&vac_lock); + lockdep_assert_irqs_disabled(); + + if (kvm_usage_count) + hardware_disable_nolock(NULL); + return 0; +} + +static void kvm_resume(void) +{ + lockdep_assert_not_held(&vac_lock); + lockdep_assert_irqs_disabled(); + + if (kvm_usage_count) + WARN_ON_ONCE(__hardware_enable_nolock()); +} + +struct notifier_block kvm_reboot_notifier =3D { + .notifier_call =3D kvm_reboot, + .priority =3D 0, +}; + +struct syscore_ops kvm_syscore_ops =3D { + .suspend =3D kvm_suspend, + .resume =3D kvm_resume, +}; + +#endif diff --git a/virt/kvm/vac.h b/virt/kvm/vac.h index 8f7123a916c5..aed178a16bdb 100644 --- a/virt/kvm/vac.h +++ b/virt/kvm/vac.h @@ -3,4 +3,30 @@ #ifndef __KVM_VAC_H__ #define __KVM_VAC_H__ =20 +#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING + +#include +#include + +int kvm_online_cpu(unsigned int cpu); +int kvm_offline_cpu(unsigned int cpu); +void hardware_disable_all(void); +int hardware_enable_all(void); + +extern struct notifier_block kvm_reboot_notifier; + +extern struct syscore_ops kvm_syscore_ops; + +#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ +static inline int hardware_enable_all(void) +{ + return 0; +} + +static inline void hardware_disable_all(void) +{ + +} +#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ + #endif --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 06931C4332F for ; Tue, 7 Nov 2023 20:20:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235349AbjKGUUz (ORCPT ); Tue, 7 Nov 2023 15:20:55 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34302 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1343713AbjKGUUi (ORCPT ); Tue, 7 Nov 2023 15:20:38 -0500 Received: from mail-yb1-xb4a.google.com (mail-yb1-xb4a.google.com [IPv6:2607:f8b0:4864:20::b4a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E37A810F1 for ; Tue, 7 Nov 2023 12:20:25 -0800 (PST) Received: by mail-yb1-xb4a.google.com with SMTP id 3f1490d57ef6-da1aa98ec19so7351514276.2 for ; Tue, 07 Nov 2023 12:20:25 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388425; x=1699993225; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=8hTPx+aup90plN/fbDSVsAALO6JNRfZPRbOkMrmrPC8=; b=nLLdQt4L4cl9w6U7qf+Q6GOj1de+8cIqjyn/hfU77zHSt0H7r3FNb2VK8w9SdoBPzx NIlHrygDILuYBIgryRWP4gnGNuG7t3HggFcXMHvK84GfIBzTLUqNNaicPgpG+NyDwLMa HrZ7BhSCpBCKIsGKvBy081rd39aXe4oHcqNQsNUUnkzJ4RPwepuPs1/pf5KK/5ww1LyF pfhjMCteiafOKpmfiPfF9uAW5r0YTfmqNi40CtdOHVxGK7mP5zj1Xbr0pQ2mjaZtx1Cx UYRXpiKvMuZOGPsp5Cm4GiHtmmjAzhCA9BA0YvrYYrvSeV2FdzKiZe5CtlyFBt0oNSLI VH3Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388425; x=1699993225; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=8hTPx+aup90plN/fbDSVsAALO6JNRfZPRbOkMrmrPC8=; b=WX3FIOJkHws2H0Loh6a4BnMyq9ksuqVXPGPO00D0TPwDBKyxULN5h7J3dpoqhHWXyz BrczGLPDFpqxS0vRrYdoJLWl6RMdXZKdFWxXcqRxt/AHIt8PlLhzaZW4kj99sGjbN/zu emkFfcLUu0k21PAg4aq53zxuQjhcSoPgBzlEKC+sOZTcBgeuCtzDvj6tqXJnLWnZeEVe +OxQW45KomAXMNWD2Hc/fP3iR7IfUbnjRZbP0VxV90u6EmjBdVDQQ15eWkXw2ygzGs4F WyfQOZFoASSJN2uAZ5KT4EKc43kww2MPxhXViMOam+3AgT+itDFxjcbEiEmuNm0oguOB cn9A== X-Gm-Message-State: AOJu0YwFe/y86jYXfa6MLetTeE+S4W2id6nfGQWWUzmsExmHkHyblgMh DPbZVfcrW2hwpCSBNA1WtcMU6B0fPOj62g== X-Google-Smtp-Source: AGHT+IGFuCkbCR1A1bUznN/BWTvWpWl1o+nDarmfo493WniIMDt849Y4dSRYdT2hDRXzfE2ycUNoWV+9qMucDg== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a25:bc90:0:b0:d9c:c9a8:8c27 with SMTP id e16-20020a25bc90000000b00d9cc9a88c27mr627785ybk.13.1699388425154; Tue, 07 Nov 2023 12:20:25 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:54 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-7-aghulati@google.com> Subject: [RFC PATCH 06/14] KVM: x86: Move user return msr operations out of KVM From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move kvm_user_return_msrs into VAC. Create helper functions to access user return msrs from KVM and (temporarily) expose them via vac.h. When more code is moved to VAC these functions will no longer need to be public and can be made internal. Signed-off-by: Anish Ghulati --- arch/x86/include/asm/kvm_host.h | 10 --- arch/x86/kvm/cpuid.c | 1 + arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/vac.c | 131 +++++++++++++++++++++++++++++ arch/x86/kvm/vac.h | 34 ++++++++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 142 ++------------------------------ 7 files changed, 173 insertions(+), 147 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_hos= t.h index e01d1aa3628c..34b995306c31 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1785,7 +1785,6 @@ struct kvm_arch_async_pf { bool direct_map; }; =20 -extern u32 __read_mostly kvm_nr_uret_msrs; extern u64 __read_mostly host_efer; extern bool __read_mostly allow_smaller_maxphyaddr; extern bool __read_mostly enable_apicv; @@ -2139,15 +2138,6 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long i= pi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); =20 -int kvm_add_user_return_msr(u32 msr); -int kvm_find_user_return_msr(u32 msr); -int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); - -static inline bool kvm_is_supported_user_return_msr(u32 msr) -{ - return kvm_find_user_return_msr(msr) >=3D 0; -} - u64 kvm_scale_tsc(u64 tsc, u64 ratio); u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multip= lier); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 01de1f659beb..961e5acd434c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -28,6 +28,7 @@ #include "trace.h" #include "pmu.h" #include "xen.h" +#include "vac.h" =20 /* * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need t= o be diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7fe9d11db8a6..f0a5cc43c023 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5,6 +5,7 @@ #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" +#include "vac.h" #include "x86.h" #include "smm.h" #include "cpuid.h" diff --git a/arch/x86/kvm/vac.c b/arch/x86/kvm/vac.c index 18d2ae7d3e47..ab77aee4e1fa 100644 --- a/arch/x86/kvm/vac.c +++ b/arch/x86/kvm/vac.c @@ -1,3 +1,134 @@ // SPDX-License-Identifier: GPL-2.0-only =20 #include "vac.h" +#include + +u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; +struct kvm_user_return_msrs __percpu *user_return_msrs; + +u32 __read_mostly kvm_nr_uret_msrs; + +void kvm_on_user_return(struct user_return_notifier *urn) +{ + unsigned int slot; + struct kvm_user_return_msrs *msrs + =3D container_of(urn, struct kvm_user_return_msrs, urn); + struct kvm_user_return_msr_values *values; + unsigned long flags; + + /* + * Disabling irqs at this point since the following code could be + * interrupted and executed through kvm_arch_hardware_disable() + */ + local_irq_save(flags); + if (msrs->registered) { + msrs->registered =3D false; + user_return_notifier_unregister(urn); + } + local_irq_restore(flags); + for (slot =3D 0; slot < kvm_nr_uret_msrs; ++slot) { + values =3D &msrs->values[slot]; + if (values->host !=3D values->curr) { + wrmsrl(kvm_uret_msrs_list[slot], values->host); + values->curr =3D values->host; + } + } +} + +void kvm_user_return_msr_cpu_online(void) +{ + unsigned int cpu =3D smp_processor_id(); + struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); + u64 value; + int i; + + for (i =3D 0; i < kvm_nr_uret_msrs; ++i) { + rdmsrl_safe(kvm_uret_msrs_list[i], &value); + msrs->values[i].host =3D value; + msrs->values[i].curr =3D value; + } +} + +void drop_user_return_notifiers(void) +{ + unsigned int cpu =3D smp_processor_id(); + struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); + + if (msrs->registered) + kvm_on_user_return(&msrs->urn); +} + +static int kvm_probe_user_return_msr(u32 msr) +{ + u64 val; + int ret; + + preempt_disable(); + ret =3D rdmsrl_safe(msr, &val); + if (ret) + goto out; + ret =3D wrmsrl_safe(msr, val); +out: + preempt_enable(); + return ret; +} + +int kvm_add_user_return_msr(u32 msr) +{ + BUG_ON(kvm_nr_uret_msrs >=3D KVM_MAX_NR_USER_RETURN_MSRS); + + if (kvm_probe_user_return_msr(msr)) + return -1; + + kvm_uret_msrs_list[kvm_nr_uret_msrs] =3D msr; + return kvm_nr_uret_msrs++; +} + +int kvm_find_user_return_msr(u32 msr) +{ + int i; + + for (i =3D 0; i < kvm_nr_uret_msrs; ++i) { + if (kvm_uret_msrs_list[i] =3D=3D msr) + return i; + } + return -1; +} + +int kvm_set_user_return_msr(unsigned int slot, u64 value, u64 mask) +{ + unsigned int cpu =3D smp_processor_id(); + struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); + int err; + + value =3D (value & mask) | (msrs->values[slot].host & ~mask); + if (value =3D=3D msrs->values[slot].curr) + return 0; + err =3D wrmsrl_safe(kvm_uret_msrs_list[slot], value); + if (err) + return 1; + + msrs->values[slot].curr =3D value; + if (!msrs->registered) { + msrs->urn.on_user_return =3D kvm_on_user_return; + user_return_notifier_register(&msrs->urn); + msrs->registered =3D true; + } + return 0; +} + +int kvm_alloc_user_return_msrs(void) +{ + user_return_msrs =3D alloc_percpu(struct kvm_user_return_msrs); + if (!user_return_msrs) { + pr_err("failed to allocate percpu kvm_user_return_msrs\n"); + return -ENOMEM; + } + kvm_nr_uret_msrs =3D 0; + return 0; +} + +void kvm_free_user_return_msrs(void) +{ + free_percpu(user_return_msrs); +} diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h index 4d5dc4700f4e..135d3be5461e 100644 --- a/arch/x86/kvm/vac.h +++ b/arch/x86/kvm/vac.h @@ -3,4 +3,38 @@ #ifndef ARCH_X86_KVM_VAC_H #define ARCH_X86_KVM_VAC_H =20 +#include + +/* + * Restoring the host value for MSRs that are only consumed when running in + * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU + * returns to userspace, i.e. the kernel can run with the guest's value. + */ +#define KVM_MAX_NR_USER_RETURN_MSRS 16 + +struct kvm_user_return_msrs { + struct user_return_notifier urn; + bool registered; + struct kvm_user_return_msr_values { + u64 host; + u64 curr; + } values[KVM_MAX_NR_USER_RETURN_MSRS]; +}; + +extern u32 __read_mostly kvm_nr_uret_msrs; + +int kvm_alloc_user_return_msrs(void); +void kvm_free_user_return_msrs(void); +int kvm_add_user_return_msr(u32 msr); +int kvm_find_user_return_msr(u32 msr); +int kvm_set_user_return_msr(unsigned int slot, u64 value, u64 mask); +void kvm_on_user_return(struct user_return_notifier *urn); +void kvm_user_return_msr_cpu_online(void); +void drop_user_return_notifiers(void); + +static inline bool kvm_is_supported_user_return_msr(u32 msr) +{ + return kvm_find_user_return_msr(msr) >=3D 0; +} + #endif // ARCH_X86_KVM_VAC_H diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 629e662b131e..7fea84a17edf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -64,6 +64,7 @@ #include "vmcs12.h" #include "vmx.h" #include "x86.h" +#include "vac.h" #include "smm.h" =20 #ifdef MODULE diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0a8b94678928..7466a5945147 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -34,6 +34,7 @@ #include "lapic.h" #include "xen.h" #include "smm.h" +#include "vac.h" =20 #include #include @@ -205,26 +206,6 @@ module_param(eager_page_split, bool, 0644); static bool __read_mostly mitigate_smt_rsb; module_param(mitigate_smt_rsb, bool, 0444); =20 -/* - * Restoring the host value for MSRs that are only consumed when running in - * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU - * returns to userspace, i.e. the kernel can run with the guest's value. - */ -#define KVM_MAX_NR_USER_RETURN_MSRS 16 - -struct kvm_user_return_msrs { - struct user_return_notifier urn; - bool registered; - struct kvm_user_return_msr_values { - u64 host; - u64 curr; - } values[KVM_MAX_NR_USER_RETURN_MSRS]; -}; - -u32 __read_mostly kvm_nr_uret_msrs; -static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; -static struct kvm_user_return_msrs __percpu *user_return_msrs; - #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ @@ -358,115 +339,6 @@ static inline void kvm_async_pf_hash_reset(struct kvm= _vcpu *vcpu) vcpu->arch.apf.gfns[i] =3D ~0; } =20 -static void kvm_on_user_return(struct user_return_notifier *urn) -{ - unsigned slot; - struct kvm_user_return_msrs *msrs - =3D container_of(urn, struct kvm_user_return_msrs, urn); - struct kvm_user_return_msr_values *values; - unsigned long flags; - - /* - * Disabling irqs at this point since the following code could be - * interrupted and executed through kvm_arch_hardware_disable() - */ - local_irq_save(flags); - if (msrs->registered) { - msrs->registered =3D false; - user_return_notifier_unregister(urn); - } - local_irq_restore(flags); - for (slot =3D 0; slot < kvm_nr_uret_msrs; ++slot) { - values =3D &msrs->values[slot]; - if (values->host !=3D values->curr) { - wrmsrl(kvm_uret_msrs_list[slot], values->host); - values->curr =3D values->host; - } - } -} - -static int kvm_probe_user_return_msr(u32 msr) -{ - u64 val; - int ret; - - preempt_disable(); - ret =3D rdmsrl_safe(msr, &val); - if (ret) - goto out; - ret =3D wrmsrl_safe(msr, val); -out: - preempt_enable(); - return ret; -} - -int kvm_add_user_return_msr(u32 msr) -{ - BUG_ON(kvm_nr_uret_msrs >=3D KVM_MAX_NR_USER_RETURN_MSRS); - - if (kvm_probe_user_return_msr(msr)) - return -1; - - kvm_uret_msrs_list[kvm_nr_uret_msrs] =3D msr; - return kvm_nr_uret_msrs++; -} - -int kvm_find_user_return_msr(u32 msr) -{ - int i; - - for (i =3D 0; i < kvm_nr_uret_msrs; ++i) { - if (kvm_uret_msrs_list[i] =3D=3D msr) - return i; - } - return -1; -} - -static void kvm_user_return_msr_cpu_online(void) -{ - unsigned int cpu =3D smp_processor_id(); - struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); - u64 value; - int i; - - for (i =3D 0; i < kvm_nr_uret_msrs; ++i) { - rdmsrl_safe(kvm_uret_msrs_list[i], &value); - msrs->values[i].host =3D value; - msrs->values[i].curr =3D value; - } -} - -int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) -{ - unsigned int cpu =3D smp_processor_id(); - struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); - int err; - - value =3D (value & mask) | (msrs->values[slot].host & ~mask); - if (value =3D=3D msrs->values[slot].curr) - return 0; - err =3D wrmsrl_safe(kvm_uret_msrs_list[slot], value); - if (err) - return 1; - - msrs->values[slot].curr =3D value; - if (!msrs->registered) { - msrs->urn.on_user_return =3D kvm_on_user_return; - user_return_notifier_register(&msrs->urn); - msrs->registered =3D true; - } - return 0; -} - -static void drop_user_return_notifiers(void) -{ - unsigned int cpu =3D smp_processor_id(); - struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); - - if (msrs->registered) - kvm_on_user_return(&msrs->urn); -} - u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; @@ -9415,13 +9287,9 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init= _ops *ops) return -ENOMEM; } =20 - user_return_msrs =3D alloc_percpu(struct kvm_user_return_msrs); - if (!user_return_msrs) { - pr_err("failed to allocate percpu kvm_user_return_msrs\n"); - r =3D -ENOMEM; + r =3D kvm_alloc_user_return_msrs(); + if (r) goto out_free_x86_emulator_cache; - } - kvm_nr_uret_msrs =3D 0; =20 r =3D kvm_mmu_vendor_module_init(); if (r) @@ -9500,7 +9368,7 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_= ops *ops) out_mmu_exit: kvm_mmu_vendor_module_exit(); out_free_percpu: - free_percpu(user_return_msrs); + kvm_free_user_return_msrs(); out_free_x86_emulator_cache: kmem_cache_destroy(x86_emulator_cache); return r; @@ -9539,7 +9407,7 @@ void kvm_x86_vendor_exit(void) #endif static_call(kvm_x86_hardware_unsetup)(); kvm_mmu_vendor_module_exit(); - free_percpu(user_return_msrs); + kvm_free_user_return_msrs(); kmem_cache_destroy(x86_emulator_cache); #ifdef CONFIG_KVM_XEN static_key_deferred_flush(&kvm_xen_enabled); --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DFDA4C4332F for ; Tue, 7 Nov 2023 20:20:59 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343552AbjKGUU7 (ORCPT ); Tue, 7 Nov 2023 15:20:59 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45460 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235268AbjKGUUj (ORCPT ); Tue, 7 Nov 2023 15:20:39 -0500 Received: from mail-yw1-x1149.google.com (mail-yw1-x1149.google.com [IPv6:2607:f8b0:4864:20::1149]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EF6BB1706 for ; Tue, 7 Nov 2023 12:20:27 -0800 (PST) Received: by mail-yw1-x1149.google.com with SMTP id 00721157ae682-5a7af53bde4so82825007b3.0 for ; Tue, 07 Nov 2023 12:20:27 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388427; x=1699993227; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=zwz/DX5/0PSQdls3zWf864vgsuhHF2nqgARPPlW8Wak=; b=3Gcygnai67kT+WqcXgt+oLUEvz+kxgU/nq8W+5br6jDrCHgNCl2rEIVka1LTq/633i 8k3jSY2D2QFP2RICc/MT+sfx/VlL6STXYxx/z00oKapA+lOGlObf/PwqXDZ0RT/GR/xP iijlp5/pLylLfkp7k52m/zOaS/rkFlo+8OZM/KgigLqCCBXTY7fR/m4E+wn24S2Sv9cw eoBNzEvSBiKPBeee8nYC5m8HIXo2TkTYkTL3Gg67FDBB+TOmJWxaIplIB0yJbHMKUYEo Cy2bTaPCtsmWrtwT2+Y8vtioDbnCOo7Oja5sdsjEg1lYOa9J8ZWNvN79/Xc3L5U8Hx4v tD6g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388427; x=1699993227; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=zwz/DX5/0PSQdls3zWf864vgsuhHF2nqgARPPlW8Wak=; b=MUh1etG3oqsdpEwO0SdOWnPK1CGT2cAZt6dLYPHNMB/IBZDIkWFIXFoGHmGwmr8hqX yNQRrPveTwa/aQ+54vR6q0UGYNTErb8WaBgwfplGAK1g6lqT0wtBQQbK9eVbRSPy7KUh CzVXRTDwZjlRx7aCGEBENN2MjkzhuuqjkAWxV49jo+0Ch2f2ReSdWMG+lMuenC5s9mTh fXfu92ifw1MudA2QYEZeJGmSImsnbPOR/nOH2J4otC6EDWEnGiG2p8/MZ1HcPEpJB+pP f81o+ggCMb4nHwbvc8VWwMFowiayZsrB0ZbNpSIilvzpctwe2p9CsJbcWOzCtpQ/nvq4 bw9w== X-Gm-Message-State: AOJu0YwObEddu495tlzHmKgaOz9bE47LlYida9t/Xdg7vDzORKQAPxD3 8O/GO73UmXKHerDJKZIOFIee8z78up2zwA== X-Google-Smtp-Source: AGHT+IE6r0sMd4kiFjw74n4jFrPY3A6ZLSngl/1kFEoTU04Ea3WGhGaUk1oh3I8bkSM8wWmAUocFK/sAzUMi/w== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a81:4ec2:0:b0:5a8:3f07:ddd6 with SMTP id c185-20020a814ec2000000b005a83f07ddd6mr284468ywb.6.1699388427125; Tue, 07 Nov 2023 12:20:27 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:55 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-8-aghulati@google.com> Subject: [RFC PATCH 07/14] KVM: SVM: Move shared SVM data structures into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati , Venkatesh Srinivas Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move svm_cpu_data into VAC. TODO: Explain why this data should be shared between KVMs and should be made global by moving it into VAC. Signed-off-by: Venkatesh Srinivas Signed-off-by: Anish Ghulati --- arch/x86/kvm/svm/svm.c | 9 ++++++++- arch/x86/kvm/svm/svm.h | 16 +--------------- arch/x86/kvm/svm/vac.c | 5 +++++ arch/x86/kvm/svm/vac.h | 23 +++++++++++++++++++++++ 4 files changed, 37 insertions(+), 16 deletions(-) create mode 100644 arch/x86/kvm/svm/vac.h diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f0a5cc43c023..d53808d8ec37 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -234,7 +234,14 @@ static u8 rsm_ins_bytes[] =3D "\x0f\xaa"; =20 static unsigned long iopm_base; =20 -DEFINE_PER_CPU(struct svm_cpu_data, svm_data); +struct kvm_ldttss_desc { + u16 limit0; + u16 base0; + unsigned base1:8, type:5, dpl:2, p:1; + unsigned limit1:4, zero0:3, g:1, base2:8; + u32 base3; + u32 zero1; +} __attribute__((packed)); =20 /* * Only MSR_TSC_AUX is switched via the user return hook. EFER is switche= d via diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 436632706848..7fc652b1b92d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -24,6 +24,7 @@ =20 #include "cpuid.h" #include "kvm_cache_regs.h" +#include "vac.h" =20 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) =20 @@ -291,21 +292,6 @@ struct vcpu_svm { bool guest_gif; }; =20 -struct svm_cpu_data { - u64 asid_generation; - u32 max_asid; - u32 next_asid; - u32 min_asid; - - struct page *save_area; - unsigned long save_area_pa; - - struct vmcb *current_vmcb; - - /* index =3D sev_asid, value =3D vmcb pointer */ - struct vmcb **sev_vmcbs; -}; - DECLARE_PER_CPU(struct svm_cpu_data, svm_data); =20 void recalc_intercepts(struct vcpu_svm *svm); diff --git a/arch/x86/kvm/svm/vac.c b/arch/x86/kvm/svm/vac.c index 4aabf16d2fc0..3e79279c6b34 100644 --- a/arch/x86/kvm/svm/vac.c +++ b/arch/x86/kvm/svm/vac.c @@ -1,2 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only =20 +#include + +#include "vac.h" + +DEFINE_PER_CPU(struct svm_cpu_data, svm_data); diff --git a/arch/x86/kvm/svm/vac.h b/arch/x86/kvm/svm/vac.h new file mode 100644 index 000000000000..2d42e4472703 --- /dev/null +++ b/arch/x86/kvm/svm/vac.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +#ifndef ARCH_X86_KVM_SVM_VAC_H +#define ARCH_X86_KVM_SVM_VAC_H + +#include "../vac.h" + +struct svm_cpu_data { + u64 asid_generation; + u32 max_asid; + u32 next_asid; + u32 min_asid; + + struct page *save_area; + unsigned long save_area_pa; + + struct vmcb *current_vmcb; + + /* index =3D sev_asid, value =3D vmcb pointer */ + struct vmcb **sev_vmcbs; +}; + +#endif // ARCH_X86_KVM_SVM_VAC_H --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4E1D9C4332F for ; Tue, 7 Nov 2023 20:21:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343962AbjKGUVD (ORCPT ); Tue, 7 Nov 2023 15:21:03 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45418 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235233AbjKGUUt (ORCPT ); Tue, 7 Nov 2023 15:20:49 -0500 Received: from mail-yw1-x1149.google.com (mail-yw1-x1149.google.com [IPv6:2607:f8b0:4864:20::1149]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 088011717 for ; Tue, 7 Nov 2023 12:20:29 -0800 (PST) Received: by mail-yw1-x1149.google.com with SMTP id 00721157ae682-5aecf6e30e9so82679177b3.1 for ; Tue, 07 Nov 2023 12:20:29 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388429; x=1699993229; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=a2ib+rA4T4utSqnHV+qndOZ+e9EwMCLw6LRU8MPurDQ=; b=UYF06aXJeSpvK3/nTxALeIgmnd0m/9ul+VrZoKeDewIsSEJqkBiRqAs/UbYWoUZfW3 bTkVKEghkat6844l8IaUS4Qe5csmMcNjdXOyxwvndqja6o+6nmC2476dBjc6UMf+3K+O YtzTSelKwPYtijur3z+TmHt3O7D9EShzuuYRlXFbvS/7PnY77v7YB4CVRVftyxlNh58/ 2/o84mQSVaJ5ALdYCECn8yNJpRfXlEaJHOtc0O+ueLbejm8wN3OkxJy19R9CHDQiwOn4 R9NVGh81QzF6thS8qGH+bakiFdqopSUYPg6n+cJcX+EjFGmubyx5BQTGHC2Tw5UDYNrk LEqg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388429; x=1699993229; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=a2ib+rA4T4utSqnHV+qndOZ+e9EwMCLw6LRU8MPurDQ=; b=NL5Hnt0L3nkFBTQIt99G7WGK4kingT37c6N+BOcDeYhNim7eKMxoHT0XnbuUbqDD+/ j1Py1UFMFCb/9kkegg6YK5oXqbBPJQKE3L90USa1N1bhcpFarDZs1Lt3WVSj3M1Bu6R0 rg6CmuPNhtWRCNHnOp2r/W/OF8vMlvJmDLvl45BM11Dq2Ses43KMbIQ0tT7i1+tAZ1TA AY9Vji2M9ZPHZZ3RStdhRG6odPFusCgUiu2WCX/PS7qje6n8lc2YMaSVirUPi6frciSV qtYfJaIVdgkNKhVRO4PCksImSjn8cgoMhbdWrpaqN5UurVe9Oyi782BZhgDQ1jwwkdpx 2loQ== X-Gm-Message-State: AOJu0YwMnQRzo/socyhywsj/QQ1UbMU7X2nn1RD7DE/Fnptl2oAWu4t3 61XNwIErGZ0s8ckSPdgnhBqV5VWiQcdtcg== X-Google-Smtp-Source: AGHT+IHHtbOnHl93HG8ZXUVwWTOGVN0m/BXCyF0SZXevfyIsFrPX0M12OPNWezfirtAPs4IcbWCrJMOA0zCGyA== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a81:9182:0:b0:59b:e81f:62ab with SMTP id i124-20020a819182000000b0059be81f62abmr294528ywg.7.1699388429222; Tue, 07 Nov 2023 12:20:29 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:56 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-9-aghulati@google.com> Subject: [RFC PATCH 08/14] KVM: VMX: Move shared VMX data structures into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati , Venkatesh Srinivas Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move vmxarea and current_vmcs into VAC. Move VPID bitmap into the VAC TODO: Explain why this data needs to be shared among multiple KVM modules and moved into VAC. Signed-off-by: Venkatesh Srinivas Signed-off-by: Anish Ghulati --- arch/x86/kvm/vmx/nested.c | 1 + arch/x86/kvm/vmx/vac.c | 47 +++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vac.h | 12 ++++++++++ arch/x86/kvm/vmx/vmx.c | 41 +++++----------------------------- arch/x86/kvm/vmx/vmx.h | 2 -- 5 files changed, 65 insertions(+), 38 deletions(-) create mode 100644 arch/x86/kvm/vmx/vac.h diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index c5ec0ef51ff7..5c6ac7662453 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -14,6 +14,7 @@ #include "pmu.h" #include "sgx.h" #include "trace.h" +#include "vac.h" #include "vmx.h" #include "x86.h" #include "smm.h" diff --git a/arch/x86/kvm/vmx/vac.c b/arch/x86/kvm/vmx/vac.c index 4aabf16d2fc0..7b8ade0fb97f 100644 --- a/arch/x86/kvm/vmx/vac.c +++ b/arch/x86/kvm/vmx/vac.c @@ -1,2 +1,49 @@ // SPDX-License-Identifier: GPL-2.0-only =20 +#include +#include + +#include "vac.h" + + +static DEFINE_PER_CPU(struct vmcs *, vmxarea); + +DEFINE_PER_CPU(struct vmcs *, current_vmcs); + +void vac_set_vmxarea(struct vmcs *vmcs, int cpu) +{ + per_cpu(vmxarea, cpu) =3D vmcs; +} + +struct vmcs *vac_get_vmxarea(int cpu) +{ + return per_cpu(vmxarea, cpu); +} + +static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); +static DEFINE_SPINLOCK(vmx_vpid_lock); + +int allocate_vpid(void) +{ + int vpid; + + if (!enable_vpid) + return 0; + spin_lock(&vmx_vpid_lock); + vpid =3D find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); + if (vpid < VMX_NR_VPIDS) + __set_bit(vpid, vmx_vpid_bitmap); + else + vpid =3D 0; + spin_unlock(&vmx_vpid_lock); + return vpid; +} + +void free_vpid(int vpid) +{ + if (!enable_vpid || vpid =3D=3D 0) + return; + spin_lock(&vmx_vpid_lock); + __clear_bit(vpid, vmx_vpid_bitmap); + spin_unlock(&vmx_vpid_lock); +} diff --git a/arch/x86/kvm/vmx/vac.h b/arch/x86/kvm/vmx/vac.h new file mode 100644 index 000000000000..46c54fe7447d --- /dev/null +++ b/arch/x86/kvm/vmx/vac.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include + +#include "../vac.h" +#include "vmcs.h" + +void vac_set_vmxarea(struct vmcs *vmcs, int cpu); + +struct vmcs *vac_get_vmxarea(int cpu); +int allocate_vpid(void); +void free_vpid(int vpid); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7fea84a17edf..407e37810419 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -60,6 +60,7 @@ #include "pmu.h" #include "sgx.h" #include "trace.h" +#include "vac.h" #include "vmcs.h" #include "vmcs12.h" #include "vmx.h" @@ -455,17 +456,12 @@ noinline void invept_error(unsigned long ext, u64 ept= p, gpa_t gpa) ext, eptp, gpa); } =20 -static DEFINE_PER_CPU(struct vmcs *, vmxarea); -DEFINE_PER_CPU(struct vmcs *, current_vmcs); /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is n= eeded * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on = it. */ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); =20 -static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); -static DEFINE_SPINLOCK(vmx_vpid_lock); - struct vmcs_config vmcs_config __ro_after_init; struct vmx_capability vmx_capability __ro_after_init; =20 @@ -2792,7 +2788,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) static int vmx_hardware_enable(void) { int cpu =3D raw_smp_processor_id(); - u64 phys_addr =3D __pa(per_cpu(vmxarea, cpu)); + u64 phys_addr =3D __pa(vac_get_vmxarea(cpu)); int r; =20 if (cr4_read_shadow() & X86_CR4_VMXE) @@ -2921,8 +2917,8 @@ static void free_kvm_area(void) int cpu; =20 for_each_possible_cpu(cpu) { - free_vmcs(per_cpu(vmxarea, cpu)); - per_cpu(vmxarea, cpu) =3D NULL; + free_vmcs(vac_get_vmxarea(cpu)); + vac_set_vmxarea(NULL, cpu); } } =20 @@ -2952,7 +2948,7 @@ static __init int alloc_kvm_area(void) if (kvm_is_using_evmcs()) vmcs->hdr.revision_id =3D vmcs_config.revision_id; =20 - per_cpu(vmxarea, cpu) =3D vmcs; + vac_set_vmxarea(vmcs, cpu); } return 0; } @@ -3897,31 +3893,6 @@ static void seg_setup(int seg) vmcs_write32(sf->ar_bytes, ar); } =20 -int allocate_vpid(void) -{ - int vpid; - - if (!enable_vpid) - return 0; - spin_lock(&vmx_vpid_lock); - vpid =3D find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); - if (vpid < VMX_NR_VPIDS) - __set_bit(vpid, vmx_vpid_bitmap); - else - vpid =3D 0; - spin_unlock(&vmx_vpid_lock); - return vpid; -} - -void free_vpid(int vpid) -{ - if (!enable_vpid || vpid =3D=3D 0) - return; - spin_lock(&vmx_vpid_lock); - __clear_bit(vpid, vmx_vpid_bitmap); - spin_unlock(&vmx_vpid_lock); -} - static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) { /* @@ -8538,8 +8509,6 @@ static __init int hardware_setup(void) kvm_caps.has_bus_lock_exit =3D cpu_has_vmx_bus_lock_detection(); kvm_caps.has_notify_vmexit =3D cpu_has_notify_vmexit(); =20 - set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ - if (enable_ept) kvm_mmu_set_ept_masks(enable_ept_ad_bits, cpu_has_vmx_ept_execute_only()); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 476119670d82..03b11159fde5 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -376,8 +376,6 @@ struct kvm_vmx { =20 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy); -int allocate_vpid(void); -void free_vpid(int vpid); void vmx_set_constant_host_state(struct vcpu_vmx *vmx); void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_s= el, --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C76F0C4332F for ; Tue, 7 Nov 2023 20:21:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343901AbjKGUVO (ORCPT ); Tue, 7 Nov 2023 15:21:14 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39914 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1343919AbjKGUUv (ORCPT ); Tue, 7 Nov 2023 15:20:51 -0500 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 468D710C6 for ; Tue, 7 Nov 2023 12:20:34 -0800 (PST) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-5a8ee6a1801so83052767b3.3 for ; Tue, 07 Nov 2023 12:20:34 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388433; x=1699993233; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=VURm3qKFEkUFzQoKppzRo3P8sq2mC6pF6UQMbNFBuGU=; b=S1fI+QILCpc7OMYIunVKk6SGSAgd243rL8Lj3ZEB/CjL2SRWRXhvhXUkK3A+0d68yS hA64NjlyA3Ewt6lZpWFbcyUdLdCzmBH6r7npStgq+M8/DWYp536IzLyNrjKXM+FL3ZIB gmak3PsOo424N4f63KkSO7ObobdFLaZbha0wIpyztWeNJMIANF7W85NBbCszWCoUbhWz 4N7IVNNKBa8mIBsn1aqgT1M3NAJ1k8Z1PndRtfrAAO5IP9UQ84p1ZEKm/MPwTnQo4h33 8lFuC8Rpb2MJq+RbvrERdHJzQEjS+rQP3h3BOugmxKTbEDh1eRHENIw1NNmZ4oeAZQx9 vn7w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388433; x=1699993233; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=VURm3qKFEkUFzQoKppzRo3P8sq2mC6pF6UQMbNFBuGU=; b=vwqV6/zxtXttp1Tgu4ieXG7kMhQrVx4wwGf1kJ6kKQkBmPKXk1E4Y13Zw7ShHwD/NI AGeI+THLd86aLvjbUYLEYQp5ET+NsNRUUqRGrGGm2Z5rwjBaGAyTDmqpHz93RAVRRytS z8UIT8G0sLHgwhCjqHAnZDw73C3syqPL6G08ENWjHGLcmS5Dtd61p6t1cfK3C+rCKTuz hFaKNqeHONkaWBQfow4WYfAh35UDUdfoJ6aIlszorw9SWCm3fpshEH1AgtB670yXVAXx YpsiuboUVS1zowPyvhHbTYe5MykZdHXL/kLHJqgzp9629c/fdADrbeXN6dP+ShKHFvtS 7dlg== X-Gm-Message-State: AOJu0Yxo2boVK8G5XLqWfW9n6fu3lFPNrRtE64rlEm8omIoJ/8MydQ+6 FL/WzSIrRWke7dipCSjEa5krNqulgAqdeQ== X-Google-Smtp-Source: AGHT+IFYlMTykIoHKWEWEDPriu/uKFKRlV6F2t2qSqzBMtA10UXuJ3YvEShZloxNkRDrWSf1n76Juc61Tk6I7w== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a0d:e24b:0:b0:59b:eb63:4beb with SMTP id l72-20020a0de24b000000b0059beb634bebmr282724ywe.7.1699388433543; Tue, 07 Nov 2023 12:20:33 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:57 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-10-aghulati@google.com> Subject: [RFC PATCH 09/14] KVM: x86: Move shared KVM state into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Venkatesh Srinivas , Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Venkatesh Srinivas Move kcpu_kick_mask and vm_running_vcpu* from arch neutral KVM code into VAC. TODO: Explain why this needs to be moved into VAC. Signed-off-by: Venkatesh Srinivas Signed-off-by: Anish Ghulati --- virt/kvm/kvm_main.c | 6 ++++-- virt/kvm/vac.c | 5 +++++ virt/kvm/vac.h | 3 +++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index fb50deaad3fd..575f044fd842 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -108,7 +108,6 @@ LIST_HEAD(vm_list); static struct kmem_cache *kvm_vcpu_cache; =20 static __read_mostly struct preempt_ops kvm_preempt_ops; -static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); =20 struct dentry *kvm_debugfs_dir; EXPORT_SYMBOL_GPL(kvm_debugfs_dir); @@ -150,7 +149,10 @@ static void kvm_uevent_notify_change(unsigned int type= , struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; =20 -static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) +{ +} =20 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { diff --git a/virt/kvm/vac.c b/virt/kvm/vac.c index ff034a53af50..c628afeb3d4b 100644 --- a/virt/kvm/vac.c +++ b/virt/kvm/vac.c @@ -6,6 +6,11 @@ #include #include =20 +DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); +EXPORT_SYMBOL(cpu_kick_mask); + +DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); + #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING DEFINE_MUTEX(vac_lock); =20 diff --git a/virt/kvm/vac.h b/virt/kvm/vac.h index aed178a16bdb..f3e7b08168df 100644 --- a/virt/kvm/vac.h +++ b/virt/kvm/vac.h @@ -29,4 +29,7 @@ static inline void hardware_disable_all(void) } #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ =20 +DECLARE_PER_CPU(cpumask_var_t, cpu_kick_mask); +DECLARE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); + #endif --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id ABD04C4332F for ; Tue, 7 Nov 2023 20:21:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343977AbjKGUVS (ORCPT ); Tue, 7 Nov 2023 15:21:18 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40002 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235331AbjKGUUx (ORCPT ); Tue, 7 Nov 2023 15:20:53 -0500 Received: from mail-yb1-xb49.google.com (mail-yb1-xb49.google.com [IPv6:2607:f8b0:4864:20::b49]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 827CC10D7 for ; Tue, 7 Nov 2023 12:20:36 -0800 (PST) Received: by mail-yb1-xb49.google.com with SMTP id 3f1490d57ef6-da2b87dd614so7658850276.2 for ; Tue, 07 Nov 2023 12:20:36 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388435; x=1699993235; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=y8QOX5ssztoDkuUgIXj56BbHOfE75T4joHq9+2piv0I=; b=pENGfChkORxTBr/8paQZY5huwtJaTd2dI2htow/kQpBiKA0LiEJgZUUW6qpIwNWsDT K6F5Y8slafK6fA1KhLyl6+PxUP7dcboIbeaXvicqOur1lFrLdjujod2SrCsBkRXm9Afo HhBrdg+YCN5isaLQ/DFlZli9b6F/PTOQEMg9Oec6wybKURZ61XdDzClx7wsQY+n8Pimw cNWpGcIfdDUJEzfx3ntp0ZBIao9JD7VAsKCUeFtBul4bWbNDEqHzK3CZ9GQ/oBWtsTtG 7e+PILzJCUs71AsrwcHmqArQLnbfVXqRgGrAw4sv+veBgtwulmn2Wb+mUMi5x4p21/ic cqJA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388435; x=1699993235; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=y8QOX5ssztoDkuUgIXj56BbHOfE75T4joHq9+2piv0I=; b=j1YZRpmqcIGDBJEmdI/fjBUhmnueL3dgqcdeHXxWH9u3nxz1GW8GuyIU2QkdFI4xqC rUuV32UB7NiqIOpnCR+qOaCQ+EQtnB1Rqxf5VmYhReygXc/Xp3csB2hxVxcy5XaGVW8m 14MRrv0Y48w1Uq24Sg8vGZkGncRlgRM3t4lvvp9vqDlTDKLHfBlRV5erlzzcogksIMhP r7cnD/GP+Od3AwNn0xrg49C+l0QZtHOwyXdnlyU87suS9dYbp7rQFoxZOfqDRKCdi5GZ FKL4SEX+/WXn/3B2SDbRpgSXPxslfFVQzAO2H6jVvky7d3gKMAmTNjsl7A84ovnqO+m+ +0Hg== X-Gm-Message-State: AOJu0YxVkVaX4nhT1Gz5SAE4BCx0ICFVzfZZ9Zi9/UGZusUU4aCLwRpb snIaw1mZdevMqUNUFwNAADGZZrasbcLv1Q== X-Google-Smtp-Source: AGHT+IEq/h9JD8pqR2MuSr0bQ3WrYReri9lAbPPdUWqZUlGkvIoffaqx3ygtJ2OX7/OTvw3xBu0uNXjGHkJ4zA== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a25:8286:0:b0:d9a:c3a2:48a3 with SMTP id r6-20020a258286000000b00d9ac3a248a3mr636616ybk.6.1699388435765; Tue, 07 Nov 2023 12:20:35 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:58 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-11-aghulati@google.com> Subject: [RFC PATCH 10/14] KVM: VMX: Move VMX enable and disable into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move loaded_vmcss_on_cpu into VAC. loaded_vmcss_on_cpu is a list of VMCSs that have been loaded on a CPU; when offlining or kexec/crashing, this needs to be VMCLEAR'd. Register and unregister callbacks for vmx_emergency_disable in vac_vmx_init/exit. These are init and exit functions for the VMX portion of VAC. Temporarily call init and exit from vmx_init and __vmx_exit. This will change once VAC is a module (the init/exit will be called via module init and exit functions). Move the call to hv_reset_evmcs from vmx_hardware_disable to vmx_module_exit. This is because vmx_hardware_enable/disable is now part of VAC. Signed-off-by: Anish Ghulati --- arch/x86/kvm/vac.h | 14 +++ arch/x86/kvm/vmx/vac.c | 190 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vac.h | 7 +- arch/x86/kvm/vmx/vmx.c | 182 ++------------------------------------- 4 files changed, 219 insertions(+), 174 deletions(-) diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h index 135d3be5461e..59cbf36ff8ce 100644 --- a/arch/x86/kvm/vac.h +++ b/arch/x86/kvm/vac.h @@ -5,6 +5,20 @@ =20 #include =20 +int __init vac_init(void); +void vac_exit(void); + +#ifdef CONFIG_KVM_INTEL +int __init vac_vmx_init(void); +void vac_vmx_exit(void); +#else +int __init vac_vmx_init(void) +{ + return 0; +} +void vac_vmx_exit(void) {} +#endif + /* * Restoring the host value for MSRs that are only consumed when running in * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU diff --git a/arch/x86/kvm/vmx/vac.c b/arch/x86/kvm/vmx/vac.c index 7b8ade0fb97f..202686ccbaec 100644 --- a/arch/x86/kvm/vmx/vac.c +++ b/arch/x86/kvm/vmx/vac.c @@ -1,10 +1,18 @@ // SPDX-License-Identifier: GPL-2.0-only =20 #include +#include #include =20 #include "vac.h" +#include "vmx_ops.h" +#include "posted_intr.h" =20 +/* + * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is n= eeded + * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on = it. + */ +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); =20 static DEFINE_PER_CPU(struct vmcs *, vmxarea); =20 @@ -47,3 +55,185 @@ void free_vpid(int vpid) __clear_bit(vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } + +void add_vmcs_to_loaded_vmcss_on_cpu( + struct list_head *loaded_vmcss_on_cpu_link, + int cpu) +{ + list_add(loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); +} + +static void __loaded_vmcs_clear(void *arg) +{ + struct loaded_vmcs *loaded_vmcs =3D arg; + int cpu =3D raw_smp_processor_id(); + + if (loaded_vmcs->cpu !=3D cpu) + return; /* vcpu migration can race with cpu offline */ + if (per_cpu(current_vmcs, cpu) =3D=3D loaded_vmcs->vmcs) + per_cpu(current_vmcs, cpu) =3D NULL; + + vmcs_clear(loaded_vmcs->vmcs); + if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) + vmcs_clear(loaded_vmcs->shadow_vmcs); + + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); + + /* + * Ensure all writes to loaded_vmcs, including deleting it from its + * current percpu list, complete before setting loaded_vmcs->cpu to + * -1, otherwise a different cpu can see loaded_vmcs->cpu =3D=3D -1 first + * and add loaded_vmcs to its percpu list before it's deleted from this + * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). + */ + smp_wmb(); + + loaded_vmcs->cpu =3D -1; + loaded_vmcs->launched =3D 0; +} + +void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) +{ + int cpu =3D loaded_vmcs->cpu; + + if (cpu !=3D -1) + smp_call_function_single(cpu, + __loaded_vmcs_clear, loaded_vmcs, 1); + +} + +static int kvm_cpu_vmxon(u64 vmxon_pointer) +{ + u64 msr; + + cr4_set_bits(X86_CR4_VMXE); + + asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" + _ASM_EXTABLE(1b, %l[fault]) + : : [vmxon_pointer] "m"(vmxon_pointer) + : : fault); + return 0; + +fault: + WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) =3D 0x%llx\n", + rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); + cr4_clear_bits(X86_CR4_VMXE); + + return -EFAULT; +} + +int vmx_hardware_enable(void) +{ + int cpu =3D raw_smp_processor_id(); + u64 phys_addr =3D __pa(vac_get_vmxarea(cpu)); + int r; + + if (cr4_read_shadow() & X86_CR4_VMXE) + return -EBUSY; + + /* + * This can happen if we hot-added a CPU but failed to allocate + * VP assist page for it. + */ + if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu)) + return -EFAULT; + + intel_pt_handle_vmx(1); + + r =3D kvm_cpu_vmxon(phys_addr); + if (r) { + intel_pt_handle_vmx(0); + return r; + } + + if (enable_ept) + ept_sync_global(); + + return 0; +} + +static void vmclear_local_loaded_vmcss(void) +{ + int cpu =3D raw_smp_processor_id(); + struct loaded_vmcs *v, *n; + + list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + __loaded_vmcs_clear(v); +} + +/* + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) + * + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossibl= e to + * atomically track post-VMXON state, e.g. this may be called in NMI conte= xt. + * Eat all faults as all other faults on VMXOFF faults are mode related, i= .e. + * faults are guaranteed to be due to the !post-VMXON check unless the CPU= is + * magically in RM, VM86, compat mode, or at CPL>0. + */ +static int kvm_cpu_vmxoff(void) +{ + asm_volatile_goto("1: vmxoff\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "cc", "memory" : fault); + + cr4_clear_bits(X86_CR4_VMXE); + return 0; + +fault: + cr4_clear_bits(X86_CR4_VMXE); + return -EIO; +} + +static void vmx_emergency_disable(void) +{ + int cpu =3D raw_smp_processor_id(); + struct loaded_vmcs *v; + + kvm_rebooting =3D true; + + /* + * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be + * set in task context. If this races with VMX is disabled by an NMI, + * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to + * kvm_rebooting set. + */ + if (!(__read_cr4() & X86_CR4_VMXE)) + return; + + list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + vmcs_clear(v->vmcs); + + kvm_cpu_vmxoff(); +} + +void vmx_hardware_disable(void) +{ + vmclear_local_loaded_vmcss(); + + if (kvm_cpu_vmxoff()) + kvm_spurious_fault(); + + intel_pt_handle_vmx(0); +} + +int __init vac_vmx_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); + + pi_init_cpu(cpu); + } + + cpu_emergency_register_virt_callback(vmx_emergency_disable); + + return 0; +} + +void vac_vmx_exit(void) +{ + cpu_emergency_unregister_virt_callback(vmx_emergency_disable); +} diff --git a/arch/x86/kvm/vmx/vac.h b/arch/x86/kvm/vmx/vac.h index 46c54fe7447d..daeea8ef0d33 100644 --- a/arch/x86/kvm/vmx/vac.h +++ b/arch/x86/kvm/vmx/vac.h @@ -9,4 +9,9 @@ void vac_set_vmxarea(struct vmcs *vmcs, int cpu); =20 struct vmcs *vac_get_vmxarea(int cpu); int allocate_vpid(void); -void free_vpid(int vpid); +void add_vmcs_to_loaded_vmcss_on_cpu( + struct list_head *loaded_vmcss_on_cpu_link, + int cpu); +void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); +int vmx_hardware_enable(void); +void vmx_hardware_disable(void); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 407e37810419..46e2d5c69d1d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -456,12 +456,6 @@ noinline void invept_error(unsigned long ext, u64 eptp= , gpa_t gpa) ext, eptp, gpa); } =20 -/* - * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is n= eeded - * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on = it. - */ -static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); - struct vmcs_config vmcs_config __ro_after_init; struct vmx_capability vmx_capability __ro_after_init; =20 @@ -716,90 +710,6 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, return ret; } =20 -/* - * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) - * - * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossibl= e to - * atomically track post-VMXON state, e.g. this may be called in NMI conte= xt. - * Eat all faults as all other faults on VMXOFF faults are mode related, i= .e. - * faults are guaranteed to be due to the !post-VMXON check unless the CPU= is - * magically in RM, VM86, compat mode, or at CPL>0. - */ -static int kvm_cpu_vmxoff(void) -{ - asm_volatile_goto("1: vmxoff\n\t" - _ASM_EXTABLE(1b, %l[fault]) - ::: "cc", "memory" : fault); - - cr4_clear_bits(X86_CR4_VMXE); - return 0; - -fault: - cr4_clear_bits(X86_CR4_VMXE); - return -EIO; -} - -static void vmx_emergency_disable(void) -{ - int cpu =3D raw_smp_processor_id(); - struct loaded_vmcs *v; - - kvm_rebooting =3D true; - - /* - * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be - * set in task context. If this races with VMX is disabled by an NMI, - * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to - * kvm_rebooting set. - */ - if (!(__read_cr4() & X86_CR4_VMXE)) - return; - - list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), - loaded_vmcss_on_cpu_link) - vmcs_clear(v->vmcs); - - kvm_cpu_vmxoff(); -} - -static void __loaded_vmcs_clear(void *arg) -{ - struct loaded_vmcs *loaded_vmcs =3D arg; - int cpu =3D raw_smp_processor_id(); - - if (loaded_vmcs->cpu !=3D cpu) - return; /* vcpu migration can race with cpu offline */ - if (per_cpu(current_vmcs, cpu) =3D=3D loaded_vmcs->vmcs) - per_cpu(current_vmcs, cpu) =3D NULL; - - vmcs_clear(loaded_vmcs->vmcs); - if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) - vmcs_clear(loaded_vmcs->shadow_vmcs); - - list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); - - /* - * Ensure all writes to loaded_vmcs, including deleting it from its - * current percpu list, complete before setting loaded_vmcs->cpu to - * -1, otherwise a different cpu can see loaded_vmcs->cpu =3D=3D -1 first - * and add loaded_vmcs to its percpu list before it's deleted from this - * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). - */ - smp_wmb(); - - loaded_vmcs->cpu =3D -1; - loaded_vmcs->launched =3D 0; -} - -void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) -{ - int cpu =3D loaded_vmcs->cpu; - - if (cpu !=3D -1) - smp_call_function_single(cpu, - __loaded_vmcs_clear, loaded_vmcs, 1); -} - static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) { @@ -1411,8 +1321,9 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cp= u, */ smp_rmb(); =20 - list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, - &per_cpu(loaded_vmcss_on_cpu, cpu)); + add_vmcs_to_loaded_vmcss_on_cpu( + &vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, + cpu); local_irq_enable(); } =20 @@ -2765,78 +2676,6 @@ static int vmx_check_processor_compat(void) return 0; } =20 -static int kvm_cpu_vmxon(u64 vmxon_pointer) -{ - u64 msr; - - cr4_set_bits(X86_CR4_VMXE); - - asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" - _ASM_EXTABLE(1b, %l[fault]) - : : [vmxon_pointer] "m"(vmxon_pointer) - : : fault); - return 0; - -fault: - WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) =3D 0x%llx\n", - rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); - cr4_clear_bits(X86_CR4_VMXE); - - return -EFAULT; -} - -static int vmx_hardware_enable(void) -{ - int cpu =3D raw_smp_processor_id(); - u64 phys_addr =3D __pa(vac_get_vmxarea(cpu)); - int r; - - if (cr4_read_shadow() & X86_CR4_VMXE) - return -EBUSY; - - /* - * This can happen if we hot-added a CPU but failed to allocate - * VP assist page for it. - */ - if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu)) - return -EFAULT; - - intel_pt_handle_vmx(1); - - r =3D kvm_cpu_vmxon(phys_addr); - if (r) { - intel_pt_handle_vmx(0); - return r; - } - - if (enable_ept) - ept_sync_global(); - - return 0; -} - -static void vmclear_local_loaded_vmcss(void) -{ - int cpu =3D raw_smp_processor_id(); - struct loaded_vmcs *v, *n; - - list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), - loaded_vmcss_on_cpu_link) - __loaded_vmcs_clear(v); -} - -static void vmx_hardware_disable(void) -{ - vmclear_local_loaded_vmcss(); - - if (kvm_cpu_vmxoff()) - kvm_spurious_fault(); - - hv_reset_evmcs(); - - intel_pt_handle_vmx(0); -} - struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) { int node =3D cpu_to_node(cpu); @@ -8182,13 +8021,15 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr =3D false; =20 - cpu_emergency_unregister_virt_callback(vmx_emergency_disable); + //TODO: Remove this exit call once VAC is a module + vac_vmx_exit(); =20 vmx_cleanup_l1d_flush(); } =20 void vmx_module_exit(void) { + hv_reset_evmcs(); __vmx_exit(); } =20 @@ -8603,7 +8444,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdat= a =3D { =20 int __init vmx_init(void) { - int r, cpu; + int r; =20 /* * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing @@ -8626,13 +8467,8 @@ int __init vmx_init(void) if (r) goto err_l1d_flush; =20 - for_each_possible_cpu(cpu) { - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); - - pi_init_cpu(cpu); - } - - cpu_emergency_register_virt_callback(vmx_emergency_disable); + //TODO: Remove this init call once VAC is a module + vac_vmx_init(); =20 vmx_check_vmcs12_offsets(); =20 --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2DA24C4332F for ; Tue, 7 Nov 2023 20:29:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234519AbjKGU3x (ORCPT ); Tue, 7 Nov 2023 15:29:53 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40120 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235381AbjKGUUz (ORCPT ); Tue, 7 Nov 2023 15:20:55 -0500 Received: from mail-yb1-xb4a.google.com (mail-yb1-xb4a.google.com [IPv6:2607:f8b0:4864:20::b4a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D238B1984 for ; Tue, 7 Nov 2023 12:20:38 -0800 (PST) Received: by mail-yb1-xb4a.google.com with SMTP id 3f1490d57ef6-da033914f7cso7049182276.0 for ; Tue, 07 Nov 2023 12:20:38 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388438; x=1699993238; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=HcP2JfbYvCw/6vKNNLkZyRn6aBpcgrYpuBFRwc0hmn0=; b=IQ/cf+xaepm+MGWALo8fsH9cJ8sM5jJT2Do1ne5KPRiGSEK8Pr7Pr0iIZLp/n+WDl5 D/yyFDNWQfo/H4Fx8Q8tH5P34Rnsl0umVencmTwIoNtcmS0EUKIxAxzSHsLCMFKimI2S FzLkIV7Zek2I55YbHUbrrQFhhgS80Q7/0WvAzE1ELKmoWKb35c1zHFp1PA28TDIFRvfs tKpKykBF7XuTXCowt42LUN4ISWr6m8Q1G1AgWaCNXzWh4batacsD2OiC6i/vbkkcpZ2P ooUNFOrOKgFYRWbNvejngKwkQEQLSEYj4hfCLUL1X+N/dIm2ZC09cM2ch+qNfbS65IZj lFww== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388438; x=1699993238; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=HcP2JfbYvCw/6vKNNLkZyRn6aBpcgrYpuBFRwc0hmn0=; b=OE3ev2lP2Wi2vIUXgGRKCuxtv0Adee1gg0nD3tCDnw29eb0BAn8K2METOpudYaCI19 thy7eoIfe+QSMBGwm8vg5Hz0fFDpSUJ9er6C6CcP5qMFPnn3FSU6TTXABAI4ZYYJREce ulz3T3m14glruoDV6Xo63HVVJm3e5+pMblAZM+Hsmbw41T2rEAhOC5c1j21dOI9MgFvt Om9pQBhNJhO4MJdFJ+BHtPSWRy98JS9DJaDh/pgs0R6Vd+dxsW9I+qpg/DNd1Ny8nUlZ fdEhTiu9rOQTNXbtqeBuAVz9cu1o8g3ACDw2d78qBJAMgYKmwZJmtG/SnV1owFjfiFrb I46w== X-Gm-Message-State: AOJu0YwG43Nf2n8HPDBv8oYN6eWtqOWZjU5UxvJPzfzczd8buy/4dtCg zp3LAHCcF8GujvAH69WuiRgwMMUtUrZ6Pg== X-Google-Smtp-Source: AGHT+IEEZXHgQn3L9aMmtKQfE9PGuocDJc957229d1gnLhARPapn1CWQw0gIogGanXDFPZy9eCQNEGYaazDpoA== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a05:6902:1746:b0:d9a:59cb:8bed with SMTP id bz6-20020a056902174600b00d9a59cb8bedmr576141ybb.5.1699388438149; Tue, 07 Nov 2023 12:20:38 -0800 (PST) Date: Tue, 7 Nov 2023 20:19:59 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-12-aghulati@google.com> Subject: [RFC PATCH 11/14] KVM: SVM: Move SVM enable and disable into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move SVM's hardware enable and disable into VAC. Similar to VMX this requires a temporary call to new init and exit functions within VAC, and moving svm_init_erratum_383 into svm_init instead of hardware enable. Delete __svm_exit and make svm_module_exit a noop. Signed-off-by: Anish Ghulati --- arch/x86/kvm/svm/sev.c | 2 +- arch/x86/kvm/svm/svm.c | 129 ++---------------------------------- arch/x86/kvm/svm/svm.h | 4 +- arch/x86/kvm/svm/svm_data.h | 23 +++++++ arch/x86/kvm/svm/vac.c | 116 ++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/vac.h | 23 +++---- arch/x86/kvm/vac.h | 12 ++++ arch/x86/kvm/vmx/vac.h | 5 ++ 8 files changed, 175 insertions(+), 139 deletions(-) create mode 100644 arch/x86/kvm/svm/svm_data.h diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b9a0a939d59f..d7b76710ab0a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -28,6 +28,7 @@ #include "mmu.h" #include "x86.h" #include "svm.h" +#include "svm_data.h" #include "svm_ops.h" #include "cpuid.h" #include "trace.h" @@ -68,7 +69,6 @@ module_param_named(debug_swap, sev_es_debug_swap_enabled,= bool, 0444); static u8 sev_enc_bit; static DECLARE_RWSEM(sev_deactivate_lock); static DEFINE_MUTEX(sev_bitmap_lock); -unsigned int max_sev_asid; static unsigned int min_sev_asid; static unsigned long sev_me_mask; static unsigned int nr_asids; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d53808d8ec37..752f769c0333 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5,11 +5,11 @@ #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -#include "vac.h" #include "x86.h" #include "smm.h" #include "cpuid.h" #include "pmu.h" +#include "vac.h" =20 #include #include @@ -68,12 +68,6 @@ static bool erratum_383_found __read_mostly; =20 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; =20 -/* - * Set osvw_len to higher value when updated Revision Guides - * are published and we know what the new status bits are - */ -static uint64_t osvw_len =3D 4, osvw_status; - static DEFINE_PER_CPU(u64, current_tsc_ratio); =20 #define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4)) @@ -211,9 +205,6 @@ module_param(vgif, int, 0444); static int lbrv =3D true; module_param(lbrv, int, 0444); =20 -static int tsc_scaling =3D true; -module_param(tsc_scaling, int, 0444); - /* * enable / disable AVIC. Because the defaults differ for APICv * support between VMX and SVM we cannot use module_param_named. @@ -584,106 +575,6 @@ static void __svm_write_tsc_multiplier(u64 multiplier) __this_cpu_write(current_tsc_ratio, multiplier); } =20 -static inline void kvm_cpu_svm_disable(void) -{ - uint64_t efer; - - wrmsrl(MSR_VM_HSAVE_PA, 0); - rdmsrl(MSR_EFER, efer); - if (efer & EFER_SVME) { - /* - * Force GIF=3D1 prior to disabling SVM, e.g. to ensure INIT and - * NMI aren't blocked. - */ - stgi(); - wrmsrl(MSR_EFER, efer & ~EFER_SVME); - } -} - -static void svm_emergency_disable(void) -{ - kvm_rebooting =3D true; - - kvm_cpu_svm_disable(); -} - -static void svm_hardware_disable(void) -{ - /* Make sure we clean up behind us */ - if (tsc_scaling) - __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); - - kvm_cpu_svm_disable(); - - amd_pmu_disable_virt(); -} - -static int svm_hardware_enable(void) -{ - - struct svm_cpu_data *sd; - uint64_t efer; - int me =3D raw_smp_processor_id(); - - rdmsrl(MSR_EFER, efer); - if (efer & EFER_SVME) - return -EBUSY; - - sd =3D per_cpu_ptr(&svm_data, me); - sd->asid_generation =3D 1; - sd->max_asid =3D cpuid_ebx(SVM_CPUID_FUNC) - 1; - sd->next_asid =3D sd->max_asid + 1; - sd->min_asid =3D max_sev_asid + 1; - - wrmsrl(MSR_EFER, efer | EFER_SVME); - - wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa); - - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { - /* - * Set the default value, even if we don't use TSC scaling - * to avoid having stale value in the msr - */ - __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); - } - - - /* - * Get OSVW bits. - * - * Note that it is possible to have a system with mixed processor - * revisions and therefore different OSVW bits. If bits are not the same - * on different processors then choose the worst case (i.e. if erratum - * is present on one processor and not on another then assume that the - * erratum is present everywhere). - */ - if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { - uint64_t len, status =3D 0; - int err; - - len =3D native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); - if (!err) - status =3D native_read_msr_safe(MSR_AMD64_OSVW_STATUS, - &err); - - if (err) - osvw_status =3D osvw_len =3D 0; - else { - if (len < osvw_len) - osvw_len =3D len; - osvw_status |=3D status; - osvw_status &=3D (1ULL << osvw_len) - 1; - } - } else - osvw_status =3D osvw_len =3D 0; - - svm_init_erratum_383(); - - amd_pmu_enable_virt(); - - return 0; -} - static void svm_cpu_uninit(int cpu) { struct svm_cpu_data *sd =3D per_cpu_ptr(&svm_data, cpu); @@ -4878,14 +4769,9 @@ static int svm_vm_init(struct kvm *kvm) return 0; } =20 -static void __svm_exit(void) -{ - cpu_emergency_unregister_virt_callback(svm_emergency_disable); -} - void svm_module_exit(void) { - __svm_exit(); + return; } =20 static struct kvm_x86_ops svm_x86_ops __initdata =3D { @@ -5325,7 +5211,8 @@ int __init svm_init(void) if (r) return r; =20 - cpu_emergency_register_virt_callback(svm_emergency_disable); + //TODO: Remove this init call once VAC is a module + vac_svm_init(); =20 /* * Common KVM initialization _must_ come last, after this, /dev/kvm is @@ -5334,11 +5221,9 @@ int __init svm_init(void) r =3D kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); if (r) - goto err_kvm_init; + return r; =20 - return 0; + svm_init_erratum_383(); =20 -err_kvm_init: - __svm_exit(); - return r; + return 0; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 7fc652b1b92d..7bd0dc0e000f 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -24,7 +24,7 @@ =20 #include "cpuid.h" #include "kvm_cache_regs.h" -#include "vac.h" +#include "svm_data.h" =20 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) =20 @@ -651,8 +651,6 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vc= pu); #define GHCB_VERSION_MIN 1ULL =20 =20 -extern unsigned int max_sev_asid; - void sev_vm_destroy(struct kvm *kvm); int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); int sev_mem_enc_register_region(struct kvm *kvm, diff --git a/arch/x86/kvm/svm/svm_data.h b/arch/x86/kvm/svm/svm_data.h new file mode 100644 index 000000000000..9605807fc9d4 --- /dev/null +++ b/arch/x86/kvm/svm/svm_data.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifndef ARCH_X86_KVM_SVM_DATA_H +#define ARCH_X86_KVM_SVM_DATA_H + +struct svm_cpu_data { + u64 asid_generation; + u32 max_asid; + u32 next_asid; + u32 min_asid; + + struct page *save_area; + unsigned long save_area_pa; + + struct vmcb *current_vmcb; + + /* index =3D sev_asid, value =3D vmcb pointer */ + struct vmcb **sev_vmcbs; +}; + +extern unsigned int max_sev_asid; + +#endif // ARCH_X86_KVM_SVM_DATA_H diff --git a/arch/x86/kvm/svm/vac.c b/arch/x86/kvm/svm/vac.c index 3e79279c6b34..2dd1c763f7d6 100644 --- a/arch/x86/kvm/svm/vac.c +++ b/arch/x86/kvm/svm/vac.c @@ -1,7 +1,123 @@ // SPDX-License-Identifier: GPL-2.0-only =20 +#include +#include #include =20 +#include "svm_ops.h" #include "vac.h" =20 DEFINE_PER_CPU(struct svm_cpu_data, svm_data); +unsigned int max_sev_asid; + +static inline void kvm_cpu_svm_disable(void) +{ + uint64_t efer; + + wrmsrl(MSR_VM_HSAVE_PA, 0); + rdmsrl(MSR_EFER, efer); + if (efer & EFER_SVME) { + /* + * Force GIF=3D1 prior to disabling SVM, e.g. to ensure INIT and + * NMI aren't blocked. + */ + stgi(); + wrmsrl(MSR_EFER, efer & ~EFER_SVME); + } +} + +static void svm_emergency_disable(void) +{ + kvm_rebooting =3D true; + + kvm_cpu_svm_disable(); +} + +void svm_hardware_disable(void) +{ + /* Make sure we clean up behind us */ + if (tsc_scaling) + // TODO: Fix everything TSC + // __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); + + kvm_cpu_svm_disable(); + + amd_pmu_disable_virt(); +} + +int svm_hardware_enable(void) +{ + + struct svm_cpu_data *sd; + uint64_t efer; + int me =3D raw_smp_processor_id(); + + rdmsrl(MSR_EFER, efer); + if (efer & EFER_SVME) + return -EBUSY; + + sd =3D per_cpu_ptr(&svm_data, me); + sd->asid_generation =3D 1; + sd->max_asid =3D cpuid_ebx(SVM_CPUID_FUNC) - 1; + sd->next_asid =3D sd->max_asid + 1; + sd->min_asid =3D max_sev_asid + 1; + + wrmsrl(MSR_EFER, efer | EFER_SVME); + + wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa); + + if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { + /* + * Set the default value, even if we don't use TSC scaling + * to avoid having stale value in the msr + */ + // TODO: Fix everything TSC + // __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); + } + + + /* + * Get OSVW bits. + * + * Note that it is possible to have a system with mixed processor + * revisions and therefore different OSVW bits. If bits are not the same + * on different processors then choose the worst case (i.e. if erratum + * is present on one processor and not on another then assume that the + * erratum is present everywhere). + */ + if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { + uint64_t len, status =3D 0; + int err; + + len =3D native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); + if (!err) + status =3D native_read_msr_safe(MSR_AMD64_OSVW_STATUS, + &err); + + if (err) + osvw_status =3D osvw_len =3D 0; + else { + if (len < osvw_len) + osvw_len =3D len; + osvw_status |=3D status; + osvw_status &=3D (1ULL << osvw_len) - 1; + } + } else + osvw_status =3D osvw_len =3D 0; + + amd_pmu_enable_virt(); + + return 0; +} + +int __init vac_svm_init(void) +{ + cpu_emergency_register_virt_callback(svm_emergency_disable); + + return 0; +} + +void vac_svm_exit(void) +{ + cpu_emergency_unregister_virt_callback(svm_emergency_disable); +} diff --git a/arch/x86/kvm/svm/vac.h b/arch/x86/kvm/svm/vac.h index 2d42e4472703..870cb8a9c8d2 100644 --- a/arch/x86/kvm/svm/vac.h +++ b/arch/x86/kvm/svm/vac.h @@ -1,23 +1,20 @@ // SPDX-License-Identifier: GPL-2.0-only -// + #ifndef ARCH_X86_KVM_SVM_VAC_H #define ARCH_X86_KVM_SVM_VAC_H =20 #include "../vac.h" +#include "svm_data.h" =20 -struct svm_cpu_data { - u64 asid_generation; - u32 max_asid; - u32 next_asid; - u32 min_asid; - - struct page *save_area; - unsigned long save_area_pa; +static int tsc_scaling =3D true; =20 - struct vmcb *current_vmcb; +/* + * Set osvw_len to higher value when updated Revision Guides + * are published and we know what the new status bits are + */ +static uint64_t osvw_len =3D 4, osvw_status; =20 - /* index =3D sev_asid, value =3D vmcb pointer */ - struct vmcb **sev_vmcbs; -}; +int svm_hardware_enable(void); +void svm_hardware_disable(void); =20 #endif // ARCH_X86_KVM_SVM_VAC_H diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h index 59cbf36ff8ce..6c0a480ee9e3 100644 --- a/arch/x86/kvm/vac.h +++ b/arch/x86/kvm/vac.h @@ -19,6 +19,18 @@ int __init vac_vmx_init(void) void vac_vmx_exit(void) {} #endif =20 +#ifdef CONFIG_KVM_AMD +int __init vac_svm_init(void); +void vac_svm_exit(void); +#else +int __init vac_svm_init(void) +{ + return 0; +} +void vac_svm_exit(void) {} +#endif + + /* * Restoring the host value for MSRs that are only consumed when running in * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU diff --git a/arch/x86/kvm/vmx/vac.h b/arch/x86/kvm/vmx/vac.h index daeea8ef0d33..d5af0ca67e3f 100644 --- a/arch/x86/kvm/vmx/vac.h +++ b/arch/x86/kvm/vmx/vac.h @@ -1,5 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only =20 +#ifndef ARCH_X86_KVM_VMX_VAC_H +#define ARCH_X86_KVM_VMX_VAC_H + #include =20 #include "../vac.h" @@ -15,3 +18,5 @@ void add_vmcs_to_loaded_vmcss_on_cpu( void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); int vmx_hardware_enable(void); void vmx_hardware_disable(void); + +#endif // ARCH_X86_KVM_VMX_VAC_H --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 344BFC4167B for ; Tue, 7 Nov 2023 20:29:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235102AbjKGU34 (ORCPT ); Tue, 7 Nov 2023 15:29:56 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45340 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235310AbjKGUU5 (ORCPT ); Tue, 7 Nov 2023 15:20:57 -0500 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1100A19A0 for ; Tue, 7 Nov 2023 12:20:41 -0800 (PST) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-5a7c97d5d5aso82552597b3.3 for ; Tue, 07 Nov 2023 12:20:41 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388441; x=1699993241; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=1V4+Et005QJyrGUczzSsikCp14fOuNidjGUDkS2EJpY=; b=0PZ2Nb62UbwQlTwZCwQNcE/FomtCS0ODHdh5S499fvWrVMTAWWAdBbjqcc/GgzKj2j nOuOSr7ZvTK/7nL/rQIV55HrHZPOUa5PXTh24e6cGNQJ1ljOxcAYbrNZPSlTnRv0bBt6 QLPQGpbPdptkZ/NzY35DMFn8GbA3Op8bWzAfFlkty0pKE/Tbh2Gu/Xa8raYk+nQEkKus Z1KFUXU0+B286x7KvBMTffpw1CUcDkTlACZR+1FlcyCMOrG2bRupuS23ZO3lojbTlwp+ tPOHkXfUaVXa6NrU3imjqEc5IJV9m6n6WovWVMQeuTWTzppDg53wbYKTUUtyY+5tqeXd h9Dg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388441; x=1699993241; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=1V4+Et005QJyrGUczzSsikCp14fOuNidjGUDkS2EJpY=; b=tBvgwG0M0uhekJWiMzFkzwhcFxCKyutabVrGw5FcpGT7VGNm1xdrWxLMhsqcooTiJP xDYNCzkVzaJjssWSsO3zfbi7/J7k87QyGLXSO8IrHrRWluHL8ozfZrcI7eDKjlVPRF4H viLyOxqEKvYzKnzeyUsuaXMBn6VhqaKsn/19IaFEUAAW4yt4oE0PvUqaQwnBhXYH6fFg F6KoPpdxeyuOQ+HLxG7KQjfsVpKrcmHg+14f2esIBjzirLtv9ZiCdsvRSyra0nBHtBSo DafpVZ8IVv2OvmtoiaQYwLf56zJHJrTDYxL4GMtmC9gVJrtjCekth6yoNpYCzf7hfWEt 2evQ== X-Gm-Message-State: AOJu0YyZd5IF2joB/Aco0eKVea4H3mHX8C5CkCgltg6Da2o2DcTlF8lf goV90cEYbG1aculeU1Vk+8JN+S73U35ASQ== X-Google-Smtp-Source: AGHT+IHiPhnv5451fxIbU3P30oe9MlsDRpH+sM3C6+FrWzD3iboYDQurYZDkpBOeDnhg0NFS+iunebigWu8FvA== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a0d:d708:0:b0:5a7:b54e:bfc1 with SMTP id z8-20020a0dd708000000b005a7b54ebfc1mr289947ywd.10.1699388441084; Tue, 07 Nov 2023 12:20:41 -0800 (PST) Date: Tue, 7 Nov 2023 20:20:00 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-13-aghulati@google.com> Subject: [RFC PATCH 12/14] KVM: x86: Move VMX and SVM support checks into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" kvm_is_***_supported checks to see if VMX and SVM are supported on the host. Move this to VAC because it needs to be called before each h/w enable and disable call. Signed-off-by: Anish Ghulati --- arch/x86/kvm/svm/svm.c | 45 +----------------------------------------- arch/x86/kvm/svm/vac.c | 43 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vac.h | 4 ++++ arch/x86/kvm/vmx/vac.c | 29 +++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 31 +---------------------------- arch/x86/kvm/x86.h | 6 ------ 6 files changed, 78 insertions(+), 80 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 752f769c0333..df5673c98e7b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -515,52 +515,9 @@ static void svm_init_osvw(struct kvm_vcpu *vcpu) vcpu->arch.osvw.status |=3D 1; } =20 -static bool __kvm_is_svm_supported(void) -{ - int cpu =3D smp_processor_id(); - struct cpuinfo_x86 *c =3D &cpu_data(cpu); - - u64 vm_cr; - - if (c->x86_vendor !=3D X86_VENDOR_AMD && - c->x86_vendor !=3D X86_VENDOR_HYGON) { - pr_err("CPU %d isn't AMD or Hygon\n", cpu); - return false; - } - - if (!cpu_has(c, X86_FEATURE_SVM)) { - pr_err("SVM not supported by CPU %d\n", cpu); - return false; - } - - if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - pr_info("KVM is unsupported when running as an SEV guest\n"); - return false; - } - - rdmsrl(MSR_VM_CR, vm_cr); - if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) { - pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu); - return false; - } - - return true; -} - -bool kvm_is_svm_supported(void) -{ - bool supported; - - migrate_disable(); - supported =3D __kvm_is_svm_supported(); - migrate_enable(); - - return supported; -} - static int svm_check_processor_compat(void) { - if (!__kvm_is_svm_supported()) + if (!kvm_is_svm_supported()) return -EIO; =20 return 0; diff --git a/arch/x86/kvm/svm/vac.c b/arch/x86/kvm/svm/vac.c index 2dd1c763f7d6..7c4db99ca7d5 100644 --- a/arch/x86/kvm/svm/vac.c +++ b/arch/x86/kvm/svm/vac.c @@ -10,6 +10,49 @@ DEFINE_PER_CPU(struct svm_cpu_data, svm_data); unsigned int max_sev_asid; =20 +static bool __kvm_is_svm_supported(void) +{ + int cpu =3D smp_processor_id(); + struct cpuinfo_x86 *c =3D &cpu_data(cpu); + + u64 vm_cr; + + if (c->x86_vendor !=3D X86_VENDOR_AMD && + c->x86_vendor !=3D X86_VENDOR_HYGON) { + pr_err("CPU %d isn't AMD or Hygon\n", cpu); + return false; + } + + if (!cpu_has(c, X86_FEATURE_SVM)) { + pr_err("SVM not supported by CPU %d\n", cpu); + return false; + } + + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { + pr_info("KVM is unsupported when running as an SEV guest\n"); + return false; + } + + rdmsrl(MSR_VM_CR, vm_cr); + if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) { + pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu); + return false; + } + + return true; +} + +bool kvm_is_svm_supported(void) +{ + bool supported; + + migrate_disable(); + supported =3D __kvm_is_svm_supported(); + migrate_enable(); + + return supported; +} + static inline void kvm_cpu_svm_disable(void) { uint64_t efer; diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h index 6c0a480ee9e3..5be30cce5a1c 100644 --- a/arch/x86/kvm/vac.h +++ b/arch/x86/kvm/vac.h @@ -9,9 +9,11 @@ int __init vac_init(void); void vac_exit(void); =20 #ifdef CONFIG_KVM_INTEL +bool kvm_is_vmx_supported(void); int __init vac_vmx_init(void); void vac_vmx_exit(void); #else +bool kvm_is_vmx_supported(void) { return false } int __init vac_vmx_init(void) { return 0; @@ -20,9 +22,11 @@ void vac_vmx_exit(void) {} #endif =20 #ifdef CONFIG_KVM_AMD +bool kvm_is_svm_supported(void); int __init vac_svm_init(void); void vac_svm_exit(void); #else +bool kvm_is_svm_supported(void) { return false } int __init vac_svm_init(void) { return 0; diff --git a/arch/x86/kvm/vmx/vac.c b/arch/x86/kvm/vmx/vac.c index 202686ccbaec..cdfdeb67a719 100644 --- a/arch/x86/kvm/vmx/vac.c +++ b/arch/x86/kvm/vmx/vac.c @@ -31,6 +31,35 @@ struct vmcs *vac_get_vmxarea(int cpu) static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); =20 +static bool __kvm_is_vmx_supported(void) +{ + int cpu =3D smp_processor_id(); + + if (!(cpuid_ecx(1) & feature_bit(VMX))) { + pr_err("VMX not supported by CPU %d\n", cpu); + return false; + } + + if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || + !this_cpu_has(X86_FEATURE_VMX)) { + pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu= ); + return false; + } + + return true; +} + +bool kvm_is_vmx_supported(void) +{ + bool supported; + + migrate_disable(); + supported =3D __kvm_is_vmx_supported(); + migrate_enable(); + + return supported; +} + int allocate_vpid(void) { int vpid; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 46e2d5c69d1d..6301b49e0e80 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2625,42 +2625,13 @@ static int setup_vmcs_config(struct vmcs_config *vm= cs_conf, return 0; } =20 -static bool __kvm_is_vmx_supported(void) -{ - int cpu =3D smp_processor_id(); - - if (!(cpuid_ecx(1) & feature_bit(VMX))) { - pr_err("VMX not supported by CPU %d\n", cpu); - return false; - } - - if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || - !this_cpu_has(X86_FEATURE_VMX)) { - pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu= ); - return false; - } - - return true; -} - -bool kvm_is_vmx_supported(void) -{ - bool supported; - - migrate_disable(); - supported =3D __kvm_is_vmx_supported(); - migrate_enable(); - - return supported; -} - static int vmx_check_processor_compat(void) { int cpu =3D raw_smp_processor_id(); struct vmcs_config vmcs_conf; struct vmx_capability vmx_cap; =20 - if (!__kvm_is_vmx_supported()) + if (!kvm_is_vmx_supported()) return -EIO; =20 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 322be05e6c5b..1da8efcd3e9c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -10,18 +10,12 @@ #include "kvm_emulate.h" =20 #ifdef CONFIG_KVM_AMD -bool kvm_is_svm_supported(void); int __init svm_init(void); void svm_module_exit(void); -#else -bool kvm_is_svm_supported(void) { return false; } #endif #ifdef CONFIG_KVM_INTEL -bool kvm_is_vmx_supported(void); int __init vmx_init(void); void vmx_module_exit(void); -#else -bool kvm_is_vmx_supported(void) { return false; } #endif =20 struct kvm_caps { --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E2A25C4332F for ; Tue, 7 Nov 2023 20:29:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1343791AbjKGU37 (ORCPT ); Tue, 7 Nov 2023 15:29:59 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39896 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235427AbjKGUVM (ORCPT ); Tue, 7 Nov 2023 15:21:12 -0500 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 745CA19B5 for ; Tue, 7 Nov 2023 12:20:44 -0800 (PST) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-5b0c27d504fso584607b3.1 for ; Tue, 07 Nov 2023 12:20:44 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388443; x=1699993243; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=yXDUOFgM+YJ6fuoNpSYf2QUTfSwK89RGtXlj6ZKeva0=; b=k62b1Vm9rGg+lOIx/+mdtr9YnTaAlXhxMXdrcg3Rsf6LD0zz4YI7+nR0L4WrFnjUPb LJbVxcT4ncaeog5PBAZZZcaTZOnMYgU0Ns2bJcx813dokSLUwPxq9A0ACYB1fToUn4G4 qwCP6A8ZOgbJGs4dFi0izlL5y5/rEKK5Wj9gfUAkLFQ970ves82tLI6H2Xw2IH4Yv4aP Bqsi5DMxIuVskLer2BJnsYLmZp/aPeUOjTRIU/y/RyJrJLJ3FNqJBEISrVgUWoBVnG31 DNTpLOfhooQY+P6eEN8WOvxp6TEuMU+CGEeU6LM8snYLtUUiSixUdp6qDqyC25wXaUKc 0pqw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388443; x=1699993243; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=yXDUOFgM+YJ6fuoNpSYf2QUTfSwK89RGtXlj6ZKeva0=; b=a3xrs9C5Ep/4g1+qtwwo+Q3jYPXPglIF+hVMb0jk8pfoDY/D8xGBw54DPfpkEUXnNF 0itkfe3L3eqW2W+3wzf6rhUoEpvR1F6JxsdlTmLF7BEur/g9L9PcRAc9m5f2+A4td7QW dT9d2KqmSnlkzqMHFDZGwLylTeDDlgCw+w51gg24qHk9M9XgS10tOBmEjQYPEKleWCK5 Ta7+NBS2HYbST1V5r3RsgUVdJecDVbZQ3vyRgdjO4Py99Iso3HWV7CWs4tTKxlVnBbTh KB2xiRSNPSTO4aYaqfF4e6L/1QT+ROfIItD2fUFztYmRIcjbricHfhY+Z0EMNg9Dwf2W N2ZA== X-Gm-Message-State: AOJu0YwjlB7UQWSg4zoO8KWs5XWeztmdNI9nnzVMHkH50gEMHWsjaq+q QH69sfyeJ3iillnYNUCjSKHEjMeXk1OkFw== X-Google-Smtp-Source: AGHT+IGngbHKbHvkhMevIYXtZILRES11OdFIH5EJXTOVmK/Q4JcN12vJAVedP442pRgmwB2lAyEha9qaYf+TcQ== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a0d:d7cb:0:b0:58c:e8da:4d1a with SMTP id z194-20020a0dd7cb000000b0058ce8da4d1amr81006ywd.2.1699388443344; Tue, 07 Nov 2023 12:20:43 -0800 (PST) Date: Tue, 7 Nov 2023 20:20:01 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-14-aghulati@google.com> Subject: [RFC PATCH 13/14] KVM: x86: VAC: Move all hardware enable/disable code into VAC From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" De-indirect hardware enable and disable. Now that all of these functions are in the VAC, they don't need to be called via an indirect ops table and static call. Signed-off-by: Anish Ghulati --- arch/x86/include/asm/kvm-x86-ops.h | 2 - arch/x86/kvm/svm/svm.c | 2 - arch/x86/kvm/svm/svm_ops.h | 1 + arch/x86/kvm/vac.c | 46 +++++++++++- arch/x86/kvm/vac.h | 9 ++- arch/x86/kvm/vmx/vmx.c | 2 - arch/x86/kvm/vmx/vmx_ops.h | 1 + arch/x86/kvm/x86.c | 117 ----------------------------- arch/x86/kvm/x86.h | 2 - include/linux/kvm_host.h | 2 + virt/kvm/vac.h | 5 ++ 11 files changed, 58 insertions(+), 131 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-= x86-ops.h index 764be4a26a0c..340dcae9dd32 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -16,8 +16,6 @@ BUILD_BUG_ON(1) */ KVM_X86_OP(vendor_exit) KVM_X86_OP(check_processor_compatibility) -KVM_X86_OP(hardware_enable) -KVM_X86_OP(hardware_disable) KVM_X86_OP(hardware_unsetup) KVM_X86_OP(has_emulated_msr) KVM_X86_OP(vcpu_after_set_cpuid) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index df5673c98e7b..fb2c72430c7a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4739,8 +4739,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata =3D { .check_processor_compatibility =3D svm_check_processor_compat, =20 .hardware_unsetup =3D svm_hardware_unsetup, - .hardware_enable =3D svm_hardware_enable, - .hardware_disable =3D svm_hardware_disable, .has_emulated_msr =3D svm_has_emulated_msr, =20 .vcpu_create =3D svm_vcpu_create, diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h index 36c8af87a707..5e89c06b5147 100644 --- a/arch/x86/kvm/svm/svm_ops.h +++ b/arch/x86/kvm/svm/svm_ops.h @@ -4,6 +4,7 @@ =20 #include =20 +#include "../vac.h" #include "x86.h" =20 #define svm_asm(insn, clobber...) \ diff --git a/arch/x86/kvm/vac.c b/arch/x86/kvm/vac.c index ab77aee4e1fa..79f5c2ac159a 100644 --- a/arch/x86/kvm/vac.c +++ b/arch/x86/kvm/vac.c @@ -3,6 +3,8 @@ #include "vac.h" #include =20 +extern bool kvm_rebooting; + u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; struct kvm_user_return_msrs __percpu *user_return_msrs; =20 @@ -35,7 +37,7 @@ void kvm_on_user_return(struct user_return_notifier *urn) } } =20 -void kvm_user_return_msr_cpu_online(void) +static void kvm_user_return_msr_cpu_online(void) { unsigned int cpu =3D smp_processor_id(); struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); @@ -49,7 +51,7 @@ void kvm_user_return_msr_cpu_online(void) } } =20 -void drop_user_return_notifiers(void) +static void drop_user_return_notifiers(void) { unsigned int cpu =3D smp_processor_id(); struct kvm_user_return_msrs *msrs =3D per_cpu_ptr(user_return_msrs, cpu); @@ -117,6 +119,46 @@ int kvm_set_user_return_msr(unsigned int slot, u64 val= ue, u64 mask) return 0; } =20 +int kvm_arch_hardware_enable(void) +{ + int ret =3D -EIO; + + kvm_user_return_msr_cpu_online(); + + if (kvm_is_vmx_supported()) + ret =3D vmx_hardware_enable(); + else if (kvm_is_svm_supported()) + ret =3D svm_hardware_enable(); + if (ret !=3D 0) + return ret; + + // TODO: Handle unstable TSC + + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + if (kvm_is_vmx_supported()) + vmx_hardware_disable(); + else if (kvm_is_svm_supported()) + svm_hardware_disable(); + drop_user_return_notifiers(); +} + +/* + * Handle a fault on a hardware virtualization (VMX or SVM) instruction. + * + * Hardware virtualization extension instructions may fault if a reboot tu= rns + * off virtualization while processes are running. Usually after catching= the + * fault we just panic; during reboot instead the instruction is ignored. + */ +noinstr void kvm_spurious_fault(void) +{ + /* Fault while not rebooting. We want the trace. */ + BUG_ON(!kvm_rebooting); +} + int kvm_alloc_user_return_msrs(void) { user_return_msrs =3D alloc_percpu(struct kvm_user_return_msrs); diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h index 5be30cce5a1c..daf1f137d196 100644 --- a/arch/x86/kvm/vac.h +++ b/arch/x86/kvm/vac.h @@ -5,13 +5,14 @@ =20 #include =20 -int __init vac_init(void); -void vac_exit(void); +void kvm_spurious_fault(void); =20 #ifdef CONFIG_KVM_INTEL bool kvm_is_vmx_supported(void); int __init vac_vmx_init(void); void vac_vmx_exit(void); +int vmx_hardware_enable(void); +void vmx_hardware_disable(void); #else bool kvm_is_vmx_supported(void) { return false } int __init vac_vmx_init(void) @@ -25,6 +26,8 @@ void vac_vmx_exit(void) {} bool kvm_is_svm_supported(void); int __init vac_svm_init(void); void vac_svm_exit(void); +int svm_hardware_enable(void); +void svm_hardware_disable(void); #else bool kvm_is_svm_supported(void) { return false } int __init vac_svm_init(void) @@ -59,8 +62,6 @@ int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); int kvm_set_user_return_msr(unsigned int slot, u64 value, u64 mask); void kvm_on_user_return(struct user_return_notifier *urn); -void kvm_user_return_msr_cpu_online(void); -void drop_user_return_notifiers(void); =20 static inline bool kvm_is_supported_user_return_msr(u32 msr) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6301b49e0e80..69a6a8591996 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8013,8 +8013,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata =3D { =20 .hardware_unsetup =3D vmx_hardware_unsetup, =20 - .hardware_enable =3D vmx_hardware_enable, - .hardware_disable =3D vmx_hardware_disable, .has_emulated_msr =3D vmx_has_emulated_msr, =20 .vm_size =3D sizeof(struct kvm_vmx), diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index 33af7b4c6eb4..60325fd39120 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -8,6 +8,7 @@ =20 #include "hyperv.h" #include "vmcs.h" +#include "../vac.h" #include "../x86.h" =20 void vmread_error(unsigned long field); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7466a5945147..a74139061e4d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -370,19 +370,6 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct ms= r_data *msr_info) return 0; } =20 -/* - * Handle a fault on a hardware virtualization (VMX or SVM) instruction. - * - * Hardware virtualization extension instructions may fault if a reboot tu= rns - * off virtualization while processes are running. Usually after catching= the - * fault we just panic; during reboot instead the instruction is ignored. - */ -noinstr void kvm_spurious_fault(void) -{ - /* Fault while not rebooting. We want the trace. */ - BUG_ON(!kvm_rebooting); -} - #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 @@ -9363,7 +9350,6 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_= ops *ops) return 0; =20 out_unwind_ops: - kvm_x86_ops.hardware_enable =3D NULL; static_call(kvm_x86_hardware_unsetup)(); out_mmu_exit: kvm_mmu_vendor_module_exit(); @@ -9414,7 +9400,6 @@ void kvm_x86_vendor_exit(void) WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); #endif mutex_lock(&vendor_module_lock); - kvm_x86_ops.hardware_enable =3D NULL; mutex_unlock(&vendor_module_lock); } =20 @@ -11952,108 +11937,6 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu= *vcpu, u8 vector) kvm_rip_write(vcpu, 0); } =20 -int kvm_arch_hardware_enable(void) -{ - struct kvm *kvm; - struct kvm_vcpu *vcpu; - unsigned long i; - int ret; - u64 local_tsc; - u64 max_tsc =3D 0; - bool stable, backwards_tsc =3D false; - - kvm_user_return_msr_cpu_online(); - - ret =3D kvm_x86_check_processor_compatibility(); - if (ret) - return ret; - - ret =3D static_call(kvm_x86_hardware_enable)(); - if (ret !=3D 0) - return ret; - - local_tsc =3D rdtsc(); - stable =3D !kvm_check_tsc_unstable(); - list_for_each_entry(kvm, &vm_list, vm_list) { - kvm_for_each_vcpu(i, vcpu, kvm) { - if (!stable && vcpu->cpu =3D=3D smp_processor_id()) - kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - if (stable && vcpu->arch.last_host_tsc > local_tsc) { - backwards_tsc =3D true; - if (vcpu->arch.last_host_tsc > max_tsc) - max_tsc =3D vcpu->arch.last_host_tsc; - } - } - } - - /* - * Sometimes, even reliable TSCs go backwards. This happens on - * platforms that reset TSC during suspend or hibernate actions, but - * maintain synchronization. We must compensate. Fortunately, we can - * detect that condition here, which happens early in CPU bringup, - * before any KVM threads can be running. Unfortunately, we can't - * bring the TSCs fully up to date with real time, as we aren't yet far - * enough into CPU bringup that we know how much real time has actually - * elapsed; our helper function, ktime_get_boottime_ns() will be using bo= ot - * variables that haven't been updated yet. - * - * So we simply find the maximum observed TSC above, then record the - * adjustment to TSC in each VCPU. When the VCPU later gets loaded, - * the adjustment will be applied. Note that we accumulate - * adjustments, in case multiple suspend cycles happen before some VCPU - * gets a chance to run again. In the event that no KVM threads get a - * chance to run, we will miss the entire elapsed period, as we'll have - * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may - * loose cycle time. This isn't too big a deal, since the loss will be - * uniform across all VCPUs (not to mention the scenario is extremely - * unlikely). It is possible that a second hibernate recovery happens - * much faster than a first, causing the observed TSC here to be - * smaller; this would require additional padding adjustment, which is - * why we set last_host_tsc to the local tsc observed here. - * - * N.B. - this code below runs only on platforms with reliable TSC, - * as that is the only way backwards_tsc is set above. Also note - * that this runs for ALL vcpus, which is not a bug; all VCPUs should - * have the same delta_cyc adjustment applied if backwards_tsc - * is detected. Note further, this adjustment is only done once, - * as we reset last_host_tsc on all VCPUs to stop this from being - * called multiple times (one for each physical CPU bringup). - * - * Platforms with unreliable TSCs don't have to deal with this, they - * will be compensated by the logic in vcpu_load, which sets the TSC to - * catchup mode. This will catchup all VCPUs to real time, but cannot - * guarantee that they stay in perfect synchronization. - */ - if (backwards_tsc) { - u64 delta_cyc =3D max_tsc - local_tsc; - list_for_each_entry(kvm, &vm_list, vm_list) { - kvm->arch.backwards_tsc_observed =3D true; - kvm_for_each_vcpu(i, vcpu, kvm) { - vcpu->arch.tsc_offset_adjustment +=3D delta_cyc; - vcpu->arch.last_host_tsc =3D local_tsc; - kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); - } - - /* - * We have to disable TSC offset matching.. if you were - * booting a VM while issuing an S4 host suspend.... - * you may have some problem. Solving this issue is - * left as an exercise to the reader. - */ - kvm->arch.last_tsc_nsec =3D 0; - kvm->arch.last_tsc_write =3D 0; - } - - } - return 0; -} - -void kvm_arch_hardware_disable(void) -{ - static_call(kvm_x86_hardware_disable)(); - drop_user_return_notifiers(); -} - bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.bsp_vcpu_id =3D=3D vcpu->vcpu_id; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1da8efcd3e9c..17ff3917b9a8 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -40,8 +40,6 @@ struct kvm_caps { u64 supported_perf_cap; }; =20 -void kvm_spurious_fault(void); - #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ ({ \ bool failed =3D (consistency_check); \ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f0afe549c0d6..d26671682764 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1467,9 +1467,11 @@ static inline void kvm_create_vcpu_debugfs(struct kv= m_vcpu *vcpu) {} #endif =20 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING +#ifndef CONFIG_X86 int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); #endif +#endif int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/vac.h b/virt/kvm/vac.h index f3e7b08168df..b5159fa3f18d 100644 --- a/virt/kvm/vac.h +++ b/virt/kvm/vac.h @@ -13,6 +13,11 @@ int kvm_offline_cpu(unsigned int cpu); void hardware_disable_all(void); int hardware_enable_all(void); =20 +#ifdef CONFIG_X86 +int kvm_arch_hardware_enable(void); +void kvm_arch_hardware_disable(void); +#endif + extern struct notifier_block kvm_reboot_notifier; =20 extern struct syscore_ops kvm_syscore_ops; --=20 2.42.0.869.gea05f2083d-goog From nobody Wed Dec 31 04:49:14 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 471A3C4332F for ; Tue, 7 Nov 2023 20:21:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235584AbjKGUVi (ORCPT ); Tue, 7 Nov 2023 15:21:38 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45448 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1343809AbjKGUVO (ORCPT ); Tue, 7 Nov 2023 15:21:14 -0500 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 35DFA1BCE for ; Tue, 7 Nov 2023 12:20:46 -0800 (PST) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-5af592fed43so71683997b3.2 for ; Tue, 07 Nov 2023 12:20:46 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1699388445; x=1699993245; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=ZQAw0ON8pSiwfP7SCUimnogabyDDeRCkLmZQJGRc1WY=; b=HKG4ED9ralL7HpTRXxFcYdGMCZ6vxeAcJgB2E+W58e1EwGHtdSIWVgPHOBUpT1eMzR vBp53Lb0XNeblTE1V7J6+7odMA5sPjc8srF1jU/PlLrxQNRUumjLdtcTWxSZsffKe8gA PXAh1T19/YNPOVZnySMN6aPbK2UvHIAYzto+Kc1/tqX0aG10e7O5pN01u2zIFN5RVZae KQ9URE/heaj6B/uSQHW3pLQfpQxb4Vhod8wXyB6rCJ5Rsfx0LT/+W3J2Pau7ARO0iuq1 0ST0S9P0qN3X/hyhMJU7NBG+GUvh0CaDbJUhsoyDEdn2pLxqJUb59sJZ3sTugFanjz3y c7ww== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1699388445; x=1699993245; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=ZQAw0ON8pSiwfP7SCUimnogabyDDeRCkLmZQJGRc1WY=; b=FaWjHxcdOTXwCN4vYITVOsXULOPqTHkzXDTgyoNTnUVecZ4piXjUH1jEF5LxLbaAag E7BS9PvuNoUioQPePund2+Isrlt7PdyE1kj53/0mbcYhyTezvJFrSeBDTFZrXD2I2jb/ y1JWm2zvtsz22JplUtLf9lal8o/wWPEqzsszNzmAYwIRTNnxlQs2PtMg72LNgaM/NgL+ sKLT8NIu8SDXULTTiTRQapvbAnII8YAXUiEDEHgn97APiEIDSIgbIgnMjbcRstDijU9X 6YzHtSSns5+g904G65gWDl/Bu7B8hmc8kirnDVLEjE/HfPG1Xboow+AfMIU3Mr9JiZJh +dZQ== X-Gm-Message-State: AOJu0YxFkGy+3MjpKK5RbPV5UXDsdFCyiFUME4igMD/r7y+tudIfljuk RnNGAglhlv1e5Dw9rjyrwVqIZh4WD8xqHA== X-Google-Smtp-Source: AGHT+IGGp0RrdBk3N/KBsHjgBX08s9Oh/FJF/LwNEpoqYzP7pNtygHXRwITYMsVvon7NnSywaSRhHMZFH7k/Og== X-Received: from aghulati-dev.c.googlers.com ([fda3:e722:ac3:cc00:2b:ff92:c0a8:18bb]) (user=aghulati job=sendgmr) by 2002:a0d:df4d:0:b0:5a7:b8d1:ef65 with SMTP id i74-20020a0ddf4d000000b005a7b8d1ef65mr286460ywe.3.1699388445403; Tue, 07 Nov 2023 12:20:45 -0800 (PST) Date: Tue, 7 Nov 2023 20:20:02 +0000 In-Reply-To: <20231107202002.667900-1-aghulati@google.com> Mime-Version: 1.0 References: <20231107202002.667900-1-aghulati@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231107202002.667900-15-aghulati@google.com> Subject: [RFC PATCH 14/14] KVM: VAC: Bring up VAC as a new module From: Anish Ghulati To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, hpa@zytor.com, Vitaly Kuznetsov , peterz@infradead.org, paulmck@kernel.org, Mark Rutland Cc: Anish Ghulati Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Add Kconfig options to build VAC as a new module. Make KVM dependent on this new "base" module. Add another Kconfig option to accept a postfix ID for multi-KVM. This option allows setting a postfix to KVM module and character device names to distinguish multiple KVMs running on the same host. Set the default value to null. Opportunistically fix indentation on the Makefile. Resolve TODOs by moving vac_vmx/svm_init/exit() calls to the module_init and module_exit functions of VAC. Add exports for VAC data and functions that are required in KVM. Also make some functions private now that they are not needed outside VAC. TODO: Fix the module name, which is currently set to kvm-vac. Signed-off-by: Anish Ghulati --- arch/x86/kvm/Kconfig | 17 ++++++++++++++++ arch/x86/kvm/Makefile | 29 +++++++++++++++------------ arch/x86/kvm/svm/svm.c | 3 --- arch/x86/kvm/svm/vac.c | 8 +++++++- arch/x86/kvm/vac.c | 42 +++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/vac.h | 2 -- arch/x86/kvm/vmx/nested.c | 5 +++-- arch/x86/kvm/vmx/vac.c | 25 ++++++++++++++++++++--- arch/x86/kvm/vmx/vac.h | 2 -- arch/x86/kvm/vmx/vmx.c | 14 +++++-------- arch/x86/kvm/x86.c | 7 ------- virt/kvm/Makefile.kvm | 15 +++++++------- virt/kvm/kvm_main.c | 7 ++++--- virt/kvm/vac.c | 7 +++++++ 14 files changed, 128 insertions(+), 55 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index adfa57d59643..42a0a0107572 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -17,10 +17,19 @@ menuconfig VIRTUALIZATION =20 if VIRTUALIZATION =20 +config VAC + tristate "Virtualization Acceleration Component (VAC)" + select KVM_GENERIC_HARDWARE_ENABLING + help + Support running multiple KVM modules on the same host. If VAC is not + selected to run as a separate module, it will run as part of KVM= , and + the system will only support a single KVM. + config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HIGH_RES_TIMERS depends on X86_LOCAL_APIC + depends on VAC select PREEMPT_NOTIFIERS select MMU_NOTIFIER select HAVE_KVM_IRQCHIP @@ -60,6 +69,14 @@ config KVM =20 If unsure, say N. =20 +config KVM_ID + string "KVM: Postfix ID for multi-KVM" + depends on KVM + default "" + help + This is the postfix string to append to the KVM module and + character device to differentiate multiple KVM builds. + config KVM_WERROR bool "Compile KVM with -Werror" # KASAN may cause the build to fail due to larger frames diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b3de4bd7988f..48d263ecaffa 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -8,33 +8,36 @@ endif =20 include $(srctree)/virt/kvm/Makefile.kvm =20 -kvm-y +=3D x86.o emulate.o i8259.o irq.o lapic.o \ +kvm-vac-y :=3D $(KVM)/vac.o vac.o +kvm-vac-$(CONFIG_KVM_INTEL) +=3D vmx/vac.o +kvm-vac-$(CONFIG_KVM_AMD) +=3D svm/vac.o + +kvm$(CONFIG_KVM_ID)-y +=3D x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ mmu/spte.o =20 -kvm-y +=3D vac.o vmx/vac.o svm/vac.o - ifdef CONFIG_HYPERV -kvm-y +=3D kvm_onhyperv.o +kvm$(CONFIG_KVM_ID)-y +=3D kvm_onhyperv.o endif =20 -kvm-$(CONFIG_X86_64) +=3D mmu/tdp_iter.o mmu/tdp_mmu.o -kvm-$(CONFIG_KVM_XEN) +=3D xen.o -kvm-$(CONFIG_KVM_SMM) +=3D smm.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_X86_64) +=3D mmu/tdp_iter.o mmu/tdp_mmu.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_KVM_XEN) +=3D xen.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_KVM_SMM) +=3D smm.o =20 -kvm-$(CONFIG_KVM_INTEL) +=3D vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/v= mcs12.o \ - vmx/hyperv.o vmx/nested.o vmx/posted_intr.o -kvm-$(CONFIG_X86_SGX_KVM) +=3D vmx/sgx.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_KVM_INTEL) +=3D vmx/vmx.o vmx/vmenter.o vmx/= pmu_intel.o vmx/vmcs12.o \ + vmx/hyperv.o vmx/nested.o vmx/posted_intr.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_X86_SGX_KVM) +=3D vmx/sgx.o =20 -kvm-$(CONFIG_KVM_AMD) +=3D svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o = svm/avic.o \ - svm/sev.o svm/hyperv.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_KVM_AMD) +=3D svm/svm.o svm/vmenter.o svm/pm= u.o svm/nested.o svm/avic.o \ + svm/sev.o svm/hyperv.o =20 ifdef CONFIG_HYPERV kvm-$(CONFIG_KVM_AMD) +=3D svm/svm_onhyperv.o endif =20 -obj-$(CONFIG_KVM) +=3D kvm.o +obj-$(CONFIG_VAC) +=3D kvm-vac.o +obj-$(CONFIG_KVM) +=3D kvm$(CONFIG_KVM_ID).o =20 AFLAGS_svm/vmenter.o :=3D -iquote $(obj) $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fb2c72430c7a..6b9f81fc84db 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5166,9 +5166,6 @@ int __init svm_init(void) if (r) return r; =20 - //TODO: Remove this init call once VAC is a module - vac_svm_init(); - /* * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace! diff --git a/arch/x86/kvm/svm/vac.c b/arch/x86/kvm/svm/vac.c index 7c4db99ca7d5..37ad2a3a9d2d 100644 --- a/arch/x86/kvm/svm/vac.c +++ b/arch/x86/kvm/svm/vac.c @@ -8,7 +8,10 @@ #include "vac.h" =20 DEFINE_PER_CPU(struct svm_cpu_data, svm_data); +EXPORT_SYMBOL_GPL(svm_data); + unsigned int max_sev_asid; +EXPORT_SYMBOL_GPL(max_sev_asid); =20 static bool __kvm_is_svm_supported(void) { @@ -52,6 +55,7 @@ bool kvm_is_svm_supported(void) =20 return supported; } +EXPORT_SYMBOL_GPL(kvm_is_svm_supported); =20 static inline void kvm_cpu_svm_disable(void) { @@ -87,6 +91,7 @@ void svm_hardware_disable(void) =20 amd_pmu_disable_virt(); } +EXPORT_SYMBOL_GPL(svm_hardware_disable); =20 int svm_hardware_enable(void) { @@ -152,6 +157,7 @@ int svm_hardware_enable(void) =20 return 0; } +EXPORT_SYMBOL_GPL(svm_hardware_enable); =20 int __init vac_svm_init(void) { @@ -160,7 +166,7 @@ int __init vac_svm_init(void) return 0; } =20 -void vac_svm_exit(void) +void __exit vac_svm_exit(void) { cpu_emergency_unregister_virt_callback(svm_emergency_disable); } diff --git a/arch/x86/kvm/vac.c b/arch/x86/kvm/vac.c index 79f5c2ac159a..2d7ca59b4c90 100644 --- a/arch/x86/kvm/vac.c +++ b/arch/x86/kvm/vac.c @@ -2,13 +2,18 @@ =20 #include "vac.h" #include +#include + +MODULE_LICENSE("GPL"); =20 extern bool kvm_rebooting; =20 u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; struct kvm_user_return_msrs __percpu *user_return_msrs; +EXPORT_SYMBOL_GPL(user_return_msrs); =20 u32 __read_mostly kvm_nr_uret_msrs; +EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); =20 void kvm_on_user_return(struct user_return_notifier *urn) { @@ -85,6 +90,7 @@ int kvm_add_user_return_msr(u32 msr) kvm_uret_msrs_list[kvm_nr_uret_msrs] =3D msr; return kvm_nr_uret_msrs++; } +EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); =20 int kvm_find_user_return_msr(u32 msr) { @@ -96,6 +102,7 @@ int kvm_find_user_return_msr(u32 msr) } return -1; } +EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); =20 int kvm_set_user_return_msr(unsigned int slot, u64 value, u64 mask) { @@ -118,6 +125,7 @@ int kvm_set_user_return_msr(unsigned int slot, u64 valu= e, u64 mask) } return 0; } +EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); =20 int kvm_arch_hardware_enable(void) { @@ -158,8 +166,9 @@ noinstr void kvm_spurious_fault(void) /* Fault while not rebooting. We want the trace. */ BUG_ON(!kvm_rebooting); } +EXPORT_SYMBOL_GPL(kvm_spurious_fault); =20 -int kvm_alloc_user_return_msrs(void) +static int kvm_alloc_user_return_msrs(void) { user_return_msrs =3D alloc_percpu(struct kvm_user_return_msrs); if (!user_return_msrs) { @@ -170,7 +179,36 @@ int kvm_alloc_user_return_msrs(void) return 0; } =20 -void kvm_free_user_return_msrs(void) +static void kvm_free_user_return_msrs(void) { free_percpu(user_return_msrs); } + +int __init vac_init(void) +{ + int r =3D 0; + + r =3D kvm_alloc_user_return_msrs(); + if (r) + goto out_user_return_msrs; + + if (kvm_is_vmx_supported()) + r =3D vac_vmx_init(); + else if (kvm_is_svm_supported()) + r =3D vac_svm_init(); + +out_user_return_msrs: + return r; +} +module_init(vac_init); + +void __exit vac_exit(void) +{ + if (kvm_is_vmx_supported()) + vac_vmx_exit(); + else if (kvm_is_svm_supported()) + vac_svm_exit(); + + kvm_free_user_return_msrs(); +} +module_exit(vac_exit); diff --git a/arch/x86/kvm/vac.h b/arch/x86/kvm/vac.h index daf1f137d196..a40e5309ec5f 100644 --- a/arch/x86/kvm/vac.h +++ b/arch/x86/kvm/vac.h @@ -56,8 +56,6 @@ struct kvm_user_return_msrs { =20 extern u32 __read_mostly kvm_nr_uret_msrs; =20 -int kvm_alloc_user_return_msrs(void); -void kvm_free_user_return_msrs(void); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); int kvm_set_user_return_msr(unsigned int slot, u64 value, u64 mask); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5c6ac7662453..c4999b4cf257 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -307,7 +307,8 @@ static void free_nested(struct kvm_vcpu *vcpu) vmx->nested.vmxon =3D false; vmx->nested.smm.vmxon =3D false; vmx->nested.vmxon_ptr =3D INVALID_GPA; - free_vpid(vmx->nested.vpid02); + if (enable_vpid) + free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv =3D -1; vmx->nested.current_vmptr =3D INVALID_GPA; if (enable_shadow_vmcs) { @@ -5115,7 +5116,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) HRTIMER_MODE_ABS_PINNED); vmx->nested.preemption_timer.function =3D vmx_preemption_timer_fn; =20 - vmx->nested.vpid02 =3D allocate_vpid(); + vmx->nested.vpid02 =3D enable_vpid ? allocate_vpid() : 0; =20 vmx->nested.vmcs02_initialized =3D false; vmx->nested.vmxon =3D true; diff --git a/arch/x86/kvm/vmx/vac.c b/arch/x86/kvm/vmx/vac.c index cdfdeb67a719..e147b9890c99 100644 --- a/arch/x86/kvm/vmx/vac.c +++ b/arch/x86/kvm/vmx/vac.c @@ -8,6 +8,10 @@ #include "vmx_ops.h" #include "posted_intr.h" =20 +// TODO: Move these to VAC +void vmclear_error(struct vmcs *vmcs, u64 phys_addr) {} +void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) {} + /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is n= eeded * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on = it. @@ -17,16 +21,19 @@ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on= _cpu); static DEFINE_PER_CPU(struct vmcs *, vmxarea); =20 DEFINE_PER_CPU(struct vmcs *, current_vmcs); +EXPORT_SYMBOL_GPL(current_vmcs); =20 void vac_set_vmxarea(struct vmcs *vmcs, int cpu) { per_cpu(vmxarea, cpu) =3D vmcs; } +EXPORT_SYMBOL_GPL(vac_set_vmxarea); =20 struct vmcs *vac_get_vmxarea(int cpu) { return per_cpu(vmxarea, cpu); } +EXPORT_SYMBOL_GPL(vac_get_vmxarea); =20 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); @@ -59,6 +66,7 @@ bool kvm_is_vmx_supported(void) =20 return supported; } +EXPORT_SYMBOL_GPL(kvm_is_vmx_supported); =20 int allocate_vpid(void) { @@ -75,6 +83,7 @@ int allocate_vpid(void) spin_unlock(&vmx_vpid_lock); return vpid; } +EXPORT_SYMBOL_GPL(allocate_vpid); =20 void free_vpid(int vpid) { @@ -84,6 +93,7 @@ void free_vpid(int vpid) __clear_bit(vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } +EXPORT_SYMBOL_GPL(free_vpid); =20 void add_vmcs_to_loaded_vmcss_on_cpu( struct list_head *loaded_vmcss_on_cpu_link, @@ -91,6 +101,7 @@ void add_vmcs_to_loaded_vmcss_on_cpu( { list_add(loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); } +EXPORT_SYMBOL_GPL(add_vmcs_to_loaded_vmcss_on_cpu); =20 static void __loaded_vmcs_clear(void *arg) { @@ -130,6 +141,7 @@ void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) __loaded_vmcs_clear, loaded_vmcs, 1); =20 } +EXPORT_SYMBOL_GPL(loaded_vmcs_clear); =20 static int kvm_cpu_vmxon(u64 vmxon_pointer) { @@ -175,11 +187,16 @@ int vmx_hardware_enable(void) return r; } =20 - if (enable_ept) + // TODO: VAC: Since we can have a mix of KVMs with enable_ept=3D0 and =3D= 1, + // we need to perform a global INVEPT here. + // TODO: Check for the + // vmx_capability invept bit before executing this. + if (1) ept_sync_global(); =20 return 0; } +EXPORT_SYMBOL_GPL(vmx_hardware_enable); =20 static void vmclear_local_loaded_vmcss(void) { @@ -246,6 +263,7 @@ void vmx_hardware_disable(void) =20 intel_pt_handle_vmx(0); } +EXPORT_SYMBOL_GPL(vmx_hardware_disable); =20 int __init vac_vmx_init(void) { @@ -254,7 +272,8 @@ int __init vac_vmx_init(void) for_each_possible_cpu(cpu) { INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); =20 - pi_init_cpu(cpu); + // TODO: Move posted interrupts list to VAC + // pi_init_cpu(cpu); } =20 cpu_emergency_register_virt_callback(vmx_emergency_disable); @@ -262,7 +281,7 @@ int __init vac_vmx_init(void) return 0; } =20 -void vac_vmx_exit(void) +void __exit vac_vmx_exit(void) { cpu_emergency_unregister_virt_callback(vmx_emergency_disable); } diff --git a/arch/x86/kvm/vmx/vac.h b/arch/x86/kvm/vmx/vac.h index d5af0ca67e3f..991df7ad9d81 100644 --- a/arch/x86/kvm/vmx/vac.h +++ b/arch/x86/kvm/vmx/vac.h @@ -16,7 +16,5 @@ void add_vmcs_to_loaded_vmcss_on_cpu( struct list_head *loaded_vmcss_on_cpu_link, int cpu); void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); -int vmx_hardware_enable(void); -void vmx_hardware_disable(void); =20 #endif // ARCH_X86_KVM_VMX_VAC_H diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 69a6a8591996..8d749da61c71 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7201,7 +7201,8 @@ static void vmx_vcpu_free(struct kvm_vcpu *vcpu) =20 if (enable_pml) vmx_destroy_pml_buffer(vmx); - free_vpid(vmx->vpid); + if (enable_vpid) + free_vpid(vmx->vpid); nested_vmx_free_vcpu(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); } @@ -7219,7 +7220,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu) =20 err =3D -ENOMEM; =20 - vmx->vpid =3D allocate_vpid(); + vmx->vpid =3D enable_vpid ? allocate_vpid() : 0; =20 /* * If PML is turned on, failure on enabling PML just results in failure @@ -7308,7 +7309,8 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu) free_pml: vmx_destroy_pml_buffer(vmx); free_vpid: - free_vpid(vmx->vpid); + if (enable_vpid) + free_vpid(vmx->vpid); return err; } =20 @@ -7992,9 +7994,6 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr =3D false; =20 - //TODO: Remove this exit call once VAC is a module - vac_vmx_exit(); - vmx_cleanup_l1d_flush(); } =20 @@ -8436,9 +8435,6 @@ int __init vmx_init(void) if (r) goto err_l1d_flush; =20 - //TODO: Remove this init call once VAC is a module - vac_vmx_init(); - vmx_check_vmcs12_offsets(); =20 /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a74139061e4d..57b5bee2d484 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9274,10 +9274,6 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init= _ops *ops) return -ENOMEM; } =20 - r =3D kvm_alloc_user_return_msrs(); - if (r) - goto out_free_x86_emulator_cache; - r =3D kvm_mmu_vendor_module_init(); if (r) goto out_free_percpu; @@ -9354,8 +9350,6 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_= ops *ops) out_mmu_exit: kvm_mmu_vendor_module_exit(); out_free_percpu: - kvm_free_user_return_msrs(); -out_free_x86_emulator_cache: kmem_cache_destroy(x86_emulator_cache); return r; } @@ -9393,7 +9387,6 @@ void kvm_x86_vendor_exit(void) #endif static_call(kvm_x86_hardware_unsetup)(); kvm_mmu_vendor_module_exit(); - kvm_free_user_return_msrs(); kmem_cache_destroy(x86_emulator_cache); #ifdef CONFIG_KVM_XEN static_key_deferred_flush(&kvm_xen_enabled); diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm index 7876021ea4d7..f1ad75797fe8 100644 --- a/virt/kvm/Makefile.kvm +++ b/virt/kvm/Makefile.kvm @@ -7,13 +7,12 @@ ccflags-y +=3D -I$(srctree)/$(src) -D__KVM__ =20 KVM ?=3D ../../../virt/kvm =20 -kvm-y :=3D $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o +kvm$(CONFIG_KVM_ID)-y :=3D $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binar= y_stats.o ifdef CONFIG_VFIO -kvm-y +=3D $(KVM)/vfio.o +kvm$(CONFIG_KVM_ID)-y +=3D $(KVM)/vfio.o endif -kvm-y +=3D $(KVM)/vac.o -kvm-$(CONFIG_KVM_MMIO) +=3D $(KVM)/coalesced_mmio.o -kvm-$(CONFIG_KVM_ASYNC_PF) +=3D $(KVM)/async_pf.o -kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) +=3D $(KVM)/irqchip.o -kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) +=3D $(KVM)/dirty_ring.o -kvm-$(CONFIG_HAVE_KVM_PFNCACHE) +=3D $(KVM)/pfncache.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_KVM_MMIO) +=3D $(KVM)/coalesced_mmio.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_KVM_ASYNC_PF) +=3D $(KVM)/async_pf.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_HAVE_KVM_IRQ_ROUTING) +=3D $(KVM)/irqchip.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_HAVE_KVM_DIRTY_RING) +=3D $(KVM)/dirty_ring.o +kvm$(CONFIG_KVM_ID)-$(CONFIG_HAVE_KVM_PFNCACHE) +=3D $(KVM)/pfncache.o diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 575f044fd842..c4af06b1e62c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -5163,9 +5163,8 @@ static struct file_operations kvm_chardev_ops =3D { }; =20 static struct miscdevice kvm_dev =3D { - KVM_MINOR, - "kvm", - &kvm_chardev_ops, + .minor =3D KVM_MINOR, + .fops =3D &kvm_chardev_ops, }; =20 static void kvm_iodevice_destructor(struct kvm_io_device *dev) @@ -5914,7 +5913,9 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align,= struct module *module) /* * Registration _must_ be the very last thing done, as this exposes * /dev/kvm to userspace, i.e. all infrastructure must be setup! + * Append CONFIG_KVM_ID to the device name. */ + kvm_dev.name =3D kasprintf(GFP_KERNEL, "kvm%s", CONFIG_KVM_ID); r =3D misc_register(&kvm_dev); if (r) { pr_err("kvm: misc device register failed\n"); diff --git a/virt/kvm/vac.c b/virt/kvm/vac.c index c628afeb3d4b..60f5bec2659a 100644 --- a/virt/kvm/vac.c +++ b/virt/kvm/vac.c @@ -10,6 +10,7 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); EXPORT_SYMBOL(cpu_kick_mask); =20 DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); +EXPORT_SYMBOL_GPL(kvm_running_vcpu); =20 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING DEFINE_MUTEX(vac_lock); @@ -56,6 +57,7 @@ int kvm_online_cpu(unsigned int cpu) mutex_unlock(&vac_lock); return ret; } +EXPORT_SYMBOL_GPL(kvm_online_cpu); =20 static void hardware_disable_nolock(void *junk) { @@ -79,6 +81,7 @@ int kvm_offline_cpu(unsigned int cpu) mutex_unlock(&vac_lock); return 0; } +EXPORT_SYMBOL_GPL(kvm_offline_cpu); =20 static void hardware_disable_all_nolock(void) { @@ -97,6 +100,7 @@ void hardware_disable_all(void) mutex_unlock(&vac_lock); cpus_read_unlock(); } +EXPORT_SYMBOL_GPL(hardware_disable_all); =20 int hardware_enable_all(void) { @@ -129,6 +133,7 @@ int hardware_enable_all(void) =20 return r; } +EXPORT_SYMBOL_GPL(hardware_enable_all); =20 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) @@ -176,10 +181,12 @@ struct notifier_block kvm_reboot_notifier =3D { .notifier_call =3D kvm_reboot, .priority =3D 0, }; +EXPORT_SYMBOL_GPL(kvm_reboot_notifier); =20 struct syscore_ops kvm_syscore_ops =3D { .suspend =3D kvm_suspend, .resume =3D kvm_resume, }; +EXPORT_SYMBOL_GPL(kvm_syscore_ops); =20 #endif --=20 2.42.0.869.gea05f2083d-goog