Add "do no harm" testing of EFER, CR0, CR4, and CR8 for SEV+ guests to
verify that the guest can read and write the registers, without hitting
e.g. a #VC on SEV-ES guests due to KVM incorrectly trying to intercept a
register.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
.../selftests/kvm/include/x86/processor.h | 23 ++++++++++++++
.../selftests/kvm/x86/sev_smoke_test.c | 30 +++++++++++++++++++
2 files changed, 53 insertions(+)
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index 4ebae4269e68..469a22122157 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -557,6 +557,11 @@ static inline uint64_t get_cr0(void)
return cr0;
}
+static inline void set_cr0(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
+}
+
static inline uint64_t get_cr3(void)
{
uint64_t cr3;
@@ -566,6 +571,11 @@ static inline uint64_t get_cr3(void)
return cr3;
}
+static inline void set_cr3(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
+}
+
static inline uint64_t get_cr4(void)
{
uint64_t cr4;
@@ -580,6 +590,19 @@ static inline void set_cr4(uint64_t val)
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
}
+static inline uint64_t get_cr8(void)
+{
+ uint64_t cr8;
+
+ __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
+ return cr8;
+}
+
+static inline void set_cr8(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
+}
+
static inline void set_idt(const struct desc_ptr *idt_desc)
{
__asm__ __volatile__("lidt %0"::"m"(*idt_desc));
diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
index 86ad1c7d068f..8bd37a476f15 100644
--- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
@@ -13,6 +13,30 @@
#include "linux/psp-sev.h"
#include "sev.h"
+static void guest_sev_test_msr(uint32_t msr)
+{
+ uint64_t val = rdmsr(msr);
+
+ wrmsr(msr, val);
+ GUEST_ASSERT(val == rdmsr(msr));
+}
+
+#define guest_sev_test_reg(reg) \
+do { \
+ uint64_t val = get_##reg(); \
+ \
+ set_##reg(val); \
+ GUEST_ASSERT(val == get_##reg()); \
+} while (0)
+
+static void guest_sev_test_regs(void)
+{
+ guest_sev_test_msr(MSR_EFER);
+ guest_sev_test_reg(cr0);
+ guest_sev_test_reg(cr3);
+ guest_sev_test_reg(cr4);
+ guest_sev_test_reg(cr8);
+}
#define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
@@ -24,6 +48,8 @@ static void guest_snp_code(void)
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
+ guest_sev_test_regs();
+
wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
vmgexit();
}
@@ -34,6 +60,8 @@ static void guest_sev_es_code(void)
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
+ guest_sev_test_regs();
+
/*
* TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
* force "termination" to signal "done" via the GHCB MSR protocol.
@@ -47,6 +75,8 @@ static void guest_sev_code(void)
GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
+ guest_sev_test_regs();
+
GUEST_DONE();
}
--
2.53.0.473.g4a7958ca14-goog
On 3/11/2026 2:48 AM, Sean Christopherson wrote:
> Add "do no harm" testing of EFER, CR0, CR4, and CR8 for SEV+ guests to
> verify that the guest can read and write the registers, without hitting
> e.g. a #VC on SEV-ES guests due to KVM incorrectly trying to intercept a
> register.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> .../selftests/kvm/include/x86/processor.h | 23 ++++++++++++++
> .../selftests/kvm/x86/sev_smoke_test.c | 30 +++++++++++++++++++
> 2 files changed, 53 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
> index 4ebae4269e68..469a22122157 100644
> --- a/tools/testing/selftests/kvm/include/x86/processor.h
> +++ b/tools/testing/selftests/kvm/include/x86/processor.h
> @@ -557,6 +557,11 @@ static inline uint64_t get_cr0(void)
> return cr0;
> }
>
> +static inline void set_cr0(uint64_t val)
> +{
> + __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
> +}
> +
> static inline uint64_t get_cr3(void)
> {
> uint64_t cr3;
> @@ -566,6 +571,11 @@ static inline uint64_t get_cr3(void)
> return cr3;
> }
>
> +static inline void set_cr3(uint64_t val)
> +{
> + __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
> +}
> +
> static inline uint64_t get_cr4(void)
> {
> uint64_t cr4;
> @@ -580,6 +590,19 @@ static inline void set_cr4(uint64_t val)
> __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
> }
>
> +static inline uint64_t get_cr8(void)
> +{
> + uint64_t cr8;
> +
> + __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
> + return cr8;
> +}
> +
> +static inline void set_cr8(uint64_t val)
> +{
> + __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
> +}
> +
> static inline void set_idt(const struct desc_ptr *idt_desc)
> {
> __asm__ __volatile__("lidt %0"::"m"(*idt_desc));
> diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
> index 86ad1c7d068f..8bd37a476f15 100644
> --- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
> +++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
> @@ -13,6 +13,30 @@
> #include "linux/psp-sev.h"
> #include "sev.h"
>
> +static void guest_sev_test_msr(uint32_t msr)
> +{
> + uint64_t val = rdmsr(msr);
> +
> + wrmsr(msr, val);
> + GUEST_ASSERT(val == rdmsr(msr));
> +}
> +
> +#define guest_sev_test_reg(reg) \
> +do { \
> + uint64_t val = get_##reg(); \
> + \
> + set_##reg(val); \
> + GUEST_ASSERT(val == get_##reg()); \
> +} while (0)
> +
> +static void guest_sev_test_regs(void)
> +{
> + guest_sev_test_msr(MSR_EFER);
> + guest_sev_test_reg(cr0);
> + guest_sev_test_reg(cr3);
> + guest_sev_test_reg(cr4);
> + guest_sev_test_reg(cr8);
> +}
>
> #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
>
> @@ -24,6 +48,8 @@ static void guest_snp_code(void)
> GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
> GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
>
> + guest_sev_test_regs();
> +
> wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
> vmgexit();
> }
> @@ -34,6 +60,8 @@ static void guest_sev_es_code(void)
> GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
> GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
>
> + guest_sev_test_regs();
> +
> /*
> * TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
> * force "termination" to signal "done" via the GHCB MSR protocol.
> @@ -47,6 +75,8 @@ static void guest_sev_code(void)
> GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
> GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
>
> + guest_sev_test_regs();
> +
> GUEST_DONE();
> }
>
Tested this on my machine:
# selftests: kvm: sev_smoke_test
# Random seed: 0x6b8b4567
ok 81 selftests: kvm: sev_smoke_test
Tested-by: Srikanth Aithal <sraithal@amd.com>
© 2016 - 2026 Red Hat, Inc.