arch/x86/include/asm/msr-index.h | 6 +++++- arch/x86/include/asm/svm.h | 6 ------ arch/x86/kernel/cpu/amd.c | 10 ++++++++++ arch/x86/kernel/cpu/hygon.c | 10 ++++++++++ arch/x86/kvm/svm/svm.c | 8 -------- 5 files changed, 25 insertions(+), 15 deletions(-)
When SVM is disabled by BIOS, one cannot use KVM but the
svm feature is still shown in the output of /proc/cpuinfo.
On Intel machines, VMX is cleared by init_ia32_feat_ctl(),
so do the same on AMD and Hygon processors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/include/asm/msr-index.h | 6 +++++-
arch/x86/include/asm/svm.h | 6 ------
arch/x86/kernel/cpu/amd.c | 10 ++++++++++
arch/x86/kernel/cpu/hygon.c | 10 ++++++++++
arch/x86/kvm/svm/svm.c | 8 --------
5 files changed, 25 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1d111350197f..6a6b0f763f67 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1112,12 +1112,16 @@
#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-/* AMD-V MSRs */
+/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
/* Hardware Feedback Interface */
#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0
#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 19bf955b67e0..fb8366af59da 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -229,10 +229,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
-#define SVM_VM_CR_VALID_MASK 0x001fULL
-#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
-#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
-
#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
@@ -571,8 +567,6 @@ struct vmcb {
#define SVM_CPUID_FUNC 0x8000000a
-#define SVM_VM_CR_SVM_DISABLE 4
-
#define SVM_SELECTOR_S_SHIFT 4
#define SVM_SELECTOR_DPL_SHIFT 5
#define SVM_SELECTOR_P_SHIFT 7
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dd8379d84445..1011ce20f513 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1031,6 +1031,8 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
static void init_amd(struct cpuinfo_x86 *c)
{
+ u64 vm_cr;
+
early_init_amd(c);
/*
@@ -1082,6 +1084,14 @@ static void init_amd(struct cpuinfo_x86 *c)
init_amd_cacheinfo(c);
+ if (cpu_has(c, X86_FEATURE_SVM)) {
+ rdmsrl(MSR_VM_CR, vm_cr);
+ if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
+ pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
+ clear_cpu_cap(c, X86_FEATURE_SVM);
+ }
+ }
+
if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
/*
* Use LFENCE for execution serialization. On families which
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index defdc594be14..16f34639ecf7 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -290,6 +290,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
static void init_hygon(struct cpuinfo_x86 *c)
{
+ u64 vm_cr;
+
early_init_hygon(c);
/*
@@ -320,6 +322,14 @@ static void init_hygon(struct cpuinfo_x86 *c)
init_hygon_cacheinfo(c);
+ if (cpu_has(c, X86_FEATURE_SVM)) {
+ rdmsrl(MSR_VM_CR, vm_cr);
+ if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
+ pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
+ clear_cpu_cap(c, X86_FEATURE_SVM);
+ }
+ }
+
if (cpu_has(c, X86_FEATURE_XMM2)) {
/*
* Use LFENCE for execution serialization. On families which
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f283eb47f6ac..7b91efb72ea6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -531,8 +531,6 @@ static bool __kvm_is_svm_supported(void)
int cpu = smp_processor_id();
struct cpuinfo_x86 *c = &cpu_data(cpu);
- u64 vm_cr;
-
if (c->x86_vendor != X86_VENDOR_AMD &&
c->x86_vendor != X86_VENDOR_HYGON) {
pr_err("CPU %d isn't AMD or Hygon\n", cpu);
@@ -549,12 +547,6 @@ static bool __kvm_is_svm_supported(void)
return false;
}
- rdmsrl(MSR_VM_CR, vm_cr);
- if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) {
- pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu);
- return false;
- }
-
return true;
}
--
2.39.1
On Thu, Sep 21, 2023, Paolo Bonzini wrote:
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index f283eb47f6ac..7b91efb72ea6 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -531,8 +531,6 @@ static bool __kvm_is_svm_supported(void)
> int cpu = smp_processor_id();
> struct cpuinfo_x86 *c = &cpu_data(cpu);
>
> - u64 vm_cr;
> -
> if (c->x86_vendor != X86_VENDOR_AMD &&
> c->x86_vendor != X86_VENDOR_HYGON) {
> pr_err("CPU %d isn't AMD or Hygon\n", cpu);
> @@ -549,12 +547,6 @@ static bool __kvm_is_svm_supported(void)
> return false;
> }
Hidden in here is
if (!cpu_has(c, X86_FEATURE_SVM)) {
pr_err("SVM not supported by CPU %d\n", cpu);
return false;
}
which will be technically wrong and potentially misleading when SVM is disabled
by BIOS, but supported by the CPU. We should do the same thing that VMX does
and manually query CPUID to check "is SVM supported", and then rely on cpu_has()
for the "is SVM supported _and_ enabled". E.g.
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f283eb47f6ac..9bcd8aad28d7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -539,22 +539,21 @@ static bool __kvm_is_svm_supported(void)
return false;
}
- if (!cpu_has(c, X86_FEATURE_SVM)) {
+ if (!(cpuid_ecx(0x80000001) & feature_bit(SVM))) {
pr_err("SVM not supported by CPU %d\n", cpu);
return false;
}
+ if (!cpu_has(c, X86_FEATURE_SVM)) {
+ pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu);
+ return false;
+ }
+
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
pr_info("KVM is unsupported when running as an SEV guest\n");
return false;
}
- rdmsrl(MSR_VM_CR, vm_cr);
- if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) {
- pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu);
- return false;
- }
-
return true;
}
The following commit has been merged into the x86/cpu branch of tip:
Commit-ID: 7deda2ce5b33edc6d689e429e3fe75382468b030
Gitweb: https://git.kernel.org/tip/7deda2ce5b33edc6d689e429e3fe75382468b030
Author: Paolo Bonzini <pbonzini@redhat.com>
AuthorDate: Thu, 21 Sep 2023 07:49:40 -04:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Fri, 22 Sep 2023 10:55:26 +02:00
x86/cpu: Clear SVM feature if disabled by BIOS
When SVM is disabled by BIOS, one cannot use KVM but the
SVM feature is still shown in the output of /proc/cpuinfo.
On Intel machines, VMX is cleared by init_ia32_feat_ctl(),
so do the same on AMD and Hygon processors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230921114940.957141-1-pbonzini@redhat.com
---
arch/x86/include/asm/msr-index.h | 6 +++++-
arch/x86/include/asm/svm.h | 6 ------
arch/x86/kernel/cpu/amd.c | 10 ++++++++++
arch/x86/kernel/cpu/hygon.c | 10 ++++++++++
arch/x86/kvm/svm/svm.c | 8 --------
5 files changed, 25 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1d11135..6a6b0f7 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1112,12 +1112,16 @@
#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-/* AMD-V MSRs */
+/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
/* Hardware Feedback Interface */
#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0
#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 19bf955..fb8366a 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -229,10 +229,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
-#define SVM_VM_CR_VALID_MASK 0x001fULL
-#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
-#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
-
#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
@@ -571,8 +567,6 @@ struct vmcb {
#define SVM_CPUID_FUNC 0x8000000a
-#define SVM_VM_CR_SVM_DISABLE 4
-
#define SVM_SELECTOR_S_SHIFT 4
#define SVM_SELECTOR_DPL_SHIFT 5
#define SVM_SELECTOR_P_SHIFT 7
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dd8379d..1011ce2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1031,6 +1031,8 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
static void init_amd(struct cpuinfo_x86 *c)
{
+ u64 vm_cr;
+
early_init_amd(c);
/*
@@ -1082,6 +1084,14 @@ static void init_amd(struct cpuinfo_x86 *c)
init_amd_cacheinfo(c);
+ if (cpu_has(c, X86_FEATURE_SVM)) {
+ rdmsrl(MSR_VM_CR, vm_cr);
+ if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
+ pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
+ clear_cpu_cap(c, X86_FEATURE_SVM);
+ }
+ }
+
if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
/*
* Use LFENCE for execution serialization. On families which
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index defdc59..16f3463 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -290,6 +290,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
static void init_hygon(struct cpuinfo_x86 *c)
{
+ u64 vm_cr;
+
early_init_hygon(c);
/*
@@ -320,6 +322,14 @@ static void init_hygon(struct cpuinfo_x86 *c)
init_hygon_cacheinfo(c);
+ if (cpu_has(c, X86_FEATURE_SVM)) {
+ rdmsrl(MSR_VM_CR, vm_cr);
+ if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
+ pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
+ clear_cpu_cap(c, X86_FEATURE_SVM);
+ }
+ }
+
if (cpu_has(c, X86_FEATURE_XMM2)) {
/*
* Use LFENCE for execution serialization. On families which
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f283eb4..7b91efb 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -531,8 +531,6 @@ static bool __kvm_is_svm_supported(void)
int cpu = smp_processor_id();
struct cpuinfo_x86 *c = &cpu_data(cpu);
- u64 vm_cr;
-
if (c->x86_vendor != X86_VENDOR_AMD &&
c->x86_vendor != X86_VENDOR_HYGON) {
pr_err("CPU %d isn't AMD or Hygon\n", cpu);
@@ -549,12 +547,6 @@ static bool __kvm_is_svm_supported(void)
return false;
}
- rdmsrl(MSR_VM_CR, vm_cr);
- if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) {
- pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu);
- return false;
- }
-
return true;
}
* tip-bot2 for Paolo Bonzini <tip-bot2@linutronix.de> wrote:
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -1031,6 +1031,8 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
>
> static void init_amd(struct cpuinfo_x86 *c)
> {
> + u64 vm_cr;
> +
> early_init_amd(c);
>
> /*
> @@ -1082,6 +1084,14 @@ static void init_amd(struct cpuinfo_x86 *c)
>
> init_amd_cacheinfo(c);
>
> + if (cpu_has(c, X86_FEATURE_SVM)) {
> + rdmsrl(MSR_VM_CR, vm_cr);
> + if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
> + pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
> + clear_cpu_cap(c, X86_FEATURE_SVM);
> + }
> + }
> +
> if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
> /*
> * Use LFENCE for execution serialization. On families which
> diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
> index defdc59..16f3463 100644
> --- a/arch/x86/kernel/cpu/hygon.c
> +++ b/arch/x86/kernel/cpu/hygon.c
> @@ -290,6 +290,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
>
> static void init_hygon(struct cpuinfo_x86 *c)
> {
> + u64 vm_cr;
> +
> early_init_hygon(c);
>
> /*
> @@ -320,6 +322,14 @@ static void init_hygon(struct cpuinfo_x86 *c)
>
> init_hygon_cacheinfo(c);
>
> + if (cpu_has(c, X86_FEATURE_SVM)) {
> + rdmsrl(MSR_VM_CR, vm_cr);
> + if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
> + pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
> + clear_cpu_cap(c, X86_FEATURE_SVM);
> + }
> + }
> +
1)
It's a bit sad that we are duplicating identical code.
2)
We are doing it in other cases as well: for example nearby_node() is
duplicated between arch/x86/kernel/cpu/amd.c and
arch/x86/kernel/cpu/hygon.c too.
3)
BTW., while look at this code I noticed that the 'Author' copyright
tag in arch/x86/kernel/cpu/hygon.c seems to be inaccurate:
// SPDX-License-Identifier: GPL-2.0+
/*
* Hygon Processor Support for Linux
*
* Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
*
* Author: Pu Wen <puwen@hygon.cn>
*/
... as for example the nearby_node() was clearly copied & derived from
arch/x86/kernel/cpu/amd.c, which does not appear to be accurately reflected
in this copyright notice?
Thanks,
Ingo
On Fri, Sep 22, 2023 at 12:26 PM Ingo Molnar <mingo@kernel.org> wrote: > It's a bit sad that we are duplicating identical code. > We are doing it in other cases as well: for example nearby_node() is > duplicated between arch/x86/kernel/cpu/amd.c and > arch/x86/kernel/cpu/hygon.c too. It is sad, and yeah I looked at nearby_node() to see if that was nevertheless expected. AMD and Hygon pretend that they are different, and use different families for what is effectively the same processor, and that's silly. In fact back in 2018 (https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg1741155.html) I complained that if AMD and Hygon are organizing the family numbers so that there's never going to be a conflict, it makes no sense to have a separate vendor at all. Yes it's not a lot of code but sooner or later some change will be applied only to amd.c, because honestly who even thinks of Hygon... Paolo
On Fri, Sep 22, 2023 at 01:18:09PM +0200, Paolo Bonzini wrote:
> AMD and Hygon pretend that they are different, and use different families
> for what is effectively the same processor, and that's silly.
Not entirely true. Off and on they come with enablement which is close
but then a bit different and it is either ifdeffery or do their own
functions. For simplicity, we've done "do their own functions" as we
cannot test the common code on Hygon.
> ... because honestly who even thinks of Hygon...
There's that too.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Sep 22, 2023 at 12:26:09PM +0200, Ingo Molnar wrote:
> It's a bit sad that we are duplicating identical code.
>
> 2)
>
> We are doing it in other cases as well: for example nearby_node() is
> duplicated between arch/x86/kernel/cpu/amd.c and
> arch/x86/kernel/cpu/hygon.c too.
We could do a unification pass at some point. At the moment is not worth
the effort, IMO, for only a handful of small functions.
> BTW., while look at this code I noticed that the 'Author' copyright
> tag in arch/x86/kernel/cpu/hygon.c seems to be inaccurate:
>
> // SPDX-License-Identifier: GPL-2.0+
> /*
> * Hygon Processor Support for Linux
> *
> * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
> *
> * Author: Pu Wen <puwen@hygon.cn>
> */
>
> ... as for example the nearby_node() was clearly copied & derived from
> arch/x86/kernel/cpu/amd.c, which does not appear to be accurately reflected
> in this copyright notice?
Perhaps it should say "copied from amd.c and adjusted" or so. That whole
file has pretty-much copied parts of amd.c AFAICT.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
© 2016 - 2026 Red Hat, Inc.