Drop the unused shift number, and reposition the constants into the cleaned-up
section. Rename VM_CR_SVM_DISABLE to be closer to its APM definition.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
This is cleanup to help a forthcoming Trenchboot change, which will use more
bits in the MSR.
---
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/include/asm-x86/msr-index.h | 8 +++-----
2 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 0854fcfc14..b819897a4a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1586,7 +1586,7 @@ static int _svm_cpu_up(bool bsp)
/* Check whether SVM feature is disabled in BIOS */
rdmsrl(MSR_K8_VM_CR, msr_content);
- if ( msr_content & K8_VMCR_SVME_DISABLE )
+ if ( msr_content & VM_CR_SVM_DISABLE )
{
printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu);
return -EINVAL;
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 3e0c6c8476..ff583cf0ed 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -116,6 +116,9 @@
#define PASID_PASID_MASK 0x000fffff
#define PASID_VALID (_AC(1, ULL) << 31)
+#define MSR_K8_VM_CR 0xc0010114
+#define VM_CR_SVM_DISABLE (_AC(1, ULL) << 4)
+
/*
* Legacy MSR constants in need of cleanup. No new MSRs below this comment.
*/
@@ -297,7 +300,6 @@
#define MSR_K8_PSTATE6 0xc001006A
#define MSR_K8_PSTATE7 0xc001006B
#define MSR_K8_ENABLE_C1E 0xc0010055
-#define MSR_K8_VM_CR 0xc0010114
#define MSR_K8_VM_HSAVE_PA 0xc0010117
#define MSR_AMD_FAM15H_EVNTSEL0 0xc0010200
@@ -318,10 +320,6 @@
#define MSR_K8_FEATURE_MASK 0xc0011004
#define MSR_K8_EXT_FEATURE_MASK 0xc0011005
-/* MSR_K8_VM_CR bits: */
-#define _K8_VMCR_SVME_DISABLE 4
-#define K8_VMCR_SVME_DISABLE (1 << _K8_VMCR_SVME_DISABLE)
-
/* AMD64 MSRs */
#define MSR_AMD64_NB_CFG 0xc001001f
#define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT 46
--
2.11.0