When #GP is intercepted by KVM, the #GP interception handler checks
whether the GPA in RAX is legal and reinjects the #GP accordingly.
Otherwise, it calls into the appropriate interception handler for
VMRUN/VMLOAD/VMSAVE. The intercept handlers do not check RAX.
However, according to the APM, the interception takes precedence
over #GP due to an invalid operand:
Generally, instruction intercepts are checked after simple exceptions
(such as #GP—when CPL is incorrect—or #UD) have been checked, but
before exceptions related to memory accesses (such as page faults) and
exceptions based on specific operand values.
Move the check into the interception handlers for VMRUN/VMLOAD/VMSAVE as
the CPU does not check RAX before the interception.
Opportunisitically make the non-SVM insn path in gp_interception() do an
early return to reduce intendation.
Signed-off-by: Yosry Ahmed <yosry@kernel.org>
---
arch/x86/kvm/svm/nested.c | 5 +++++
arch/x86/kvm/svm/svm.c | 34 +++++++++++++++++-----------------
2 files changed, 22 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 5ff01d2ac85e4..016bf88ec2def 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1115,6 +1115,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
vmcb12_gpa = svm->vmcb->save.rax;
+ if (!page_address_valid(vcpu, vmcb12_gpa)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
ret = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
if (ret) {
if (ret == -EFAULT) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 796a6887305d6..f019a3f7705ae 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2183,6 +2183,7 @@ static int intr_interception(struct kvm_vcpu *vcpu)
static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ u64 vmcb12_gpa = svm->vmcb->save.rax;
struct vmcb *vmcb12;
struct kvm_host_map map;
int ret;
@@ -2190,7 +2191,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
if (nested_svm_check_permissions(vcpu))
return 1;
- ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (!page_address_valid(vcpu, vmcb12_gpa)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
if (ret) {
if (ret == -EINVAL)
kvm_inject_gp(vcpu, 0);
@@ -2306,24 +2312,18 @@ static int gp_interception(struct kvm_vcpu *vcpu)
goto reinject;
opcode = svm_instr_opcode(vcpu);
+ if (opcode != NONE_SVM_INSTR)
+ return emulate_svm_instr(vcpu, opcode);
- if (opcode == NONE_SVM_INSTR) {
- if (!enable_vmware_backdoor)
- goto reinject;
-
- /*
- * VMware backdoor emulation on #GP interception only handles
- * IN{S}, OUT{S}, and RDPMC.
- */
- if (!is_guest_mode(vcpu))
- return kvm_emulate_instruction(vcpu,
- EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
- } else {
- if (!page_address_valid(vcpu, svm->vmcb->save.rax))
- goto reinject;
+ if (!enable_vmware_backdoor)
+ goto reinject;
- return emulate_svm_instr(vcpu, opcode);
- }
+ /*
+ * VMware backdoor emulation on #GP interception only handles
+ * IN{S}, OUT{S}, and RDPMC.
+ */
+ if (!is_guest_mode(vcpu))
+ return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
reinject:
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
--
2.53.0.851.ga537e3e6e9-goog
> @@ -2306,24 +2312,18 @@ static int gp_interception(struct kvm_vcpu *vcpu)
> goto reinject;
>
> opcode = svm_instr_opcode(vcpu);
> + if (opcode != NONE_SVM_INSTR)
> + return emulate_svm_instr(vcpu, opcode);
>
> - if (opcode == NONE_SVM_INSTR) {
> - if (!enable_vmware_backdoor)
> - goto reinject;
> -
> - /*
> - * VMware backdoor emulation on #GP interception only handles
> - * IN{S}, OUT{S}, and RDPMC.
> - */
> - if (!is_guest_mode(vcpu))
> - return kvm_emulate_instruction(vcpu,
> - EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
> - } else {
> - if (!page_address_valid(vcpu, svm->vmcb->save.rax))
> - goto reinject;
> + if (!enable_vmware_backdoor)
> + goto reinject;
>
> - return emulate_svm_instr(vcpu, opcode);
> - }
> + /*
> + * VMware backdoor emulation on #GP interception only handles
> + * IN{S}, OUT{S}, and RDPMC.
> + */
> + if (!is_guest_mode(vcpu))
> + return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
AI review pointed out that we should not drop the page_address_valid()
from here, because if an SVM instruction is executed by L2, and KVM
intercepts the #GP, it should re-inject the #GP into L2 if RAX is
illegal instead of synthesizing a #VMEXIT to L1. My initial instinct
is to keep the check here as well as in the intercept handlers, but
no, L1's intercept should take precedence over #GP due to invalid RAX
anyway. In fact, if L1 has the intercept set, then it must be set in
vmcb02, and KVM would get a #VMEXIT on the intercept not on #GP.
The actual problem is that the current code does not check if L1
actually sets the intercept in emulate_svm_instr(). So if L1 and KVM
do not set the intercept, and RAX is invalid, the current code could
synthesize a spurious #VMEXIT to L1 instead of reinjecting #GP. The
existing check on RAX prevents that, but it doesn't really fix the
problem because if we get #GP due to CPL != 0, we'll still generate a
spurious #VMEXIT to L1. What we really should be doing in
gp_interception() is:
1. if CPL != 0, re-inject #GP.
2. If in guest mode and L1 intercepts the instruction, synthesize a #VMEXIT.
3. Otherwise emulate the instruction, which would take care of
re-injecting the #GP if RAX is invalid with this patch.
Something like this on top (over 2 patches):
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index cf5ebdc4b27bf..8942272eb80b2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2237,10 +2237,11 @@ static int emulate_svm_instr(struct kvm_vcpu
*vcpu, int opcode)
[SVM_INSTR_VMLOAD] = vmload_interception,
[SVM_INSTR_VMSAVE] = vmsave_interception,
};
+ int exit_code = guest_mode_exit_codes[opcode];
struct vcpu_svm *svm = to_svm(vcpu);
- if (is_guest_mode(vcpu)) {
- nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
+ if (is_guest_mode(vcpu) &&
vmcb12_is_intercept(&svm->nested.ctl, exit_code))
+ nested_svm_simple_vmexit(svm, exit_code);
return 1;
}
return svm_instr_handlers[opcode](vcpu);
@@ -2269,8 +2270,11 @@ static int gp_interception(struct kvm_vcpu *vcpu)
goto reinject;
opcode = svm_instr_opcode(vcpu);
- if (opcode != NONE_SVM_INSTR)
+ if (opcode != NONE_SVM_INSTR) {
+ if (svm->vmcb->save.cpl)
+ goto reinject;
return emulate_svm_instr(vcpu, opcode);
+ }
if (!enable_vmware_backdoor)
goto reinject;
---
Sean, do you prefer that I send patches separately on top of this
series or a new version with these patches included?
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index cf5ebdc4b27bf..8942272eb80b2 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2237,10 +2237,11 @@ static int emulate_svm_instr(struct kvm_vcpu
> *vcpu, int opcode)
> [SVM_INSTR_VMLOAD] = vmload_interception,
> [SVM_INSTR_VMSAVE] = vmsave_interception,
> };
> + int exit_code = guest_mode_exit_codes[opcode];
> struct vcpu_svm *svm = to_svm(vcpu);
>
> - if (is_guest_mode(vcpu)) {
> - nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
> + if (is_guest_mode(vcpu) &&
> vmcb12_is_intercept(&svm->nested.ctl, exit_code))
> + nested_svm_simple_vmexit(svm, exit_code);
No, this is wrong.. well it's incomplete. So we do need to check the
intercept in vmcb12, but, if it's not set, we'll end up with KVM
emulating the instructions through vmload_vmsave_interception(), and
treating RAX as an L1 GPA.
If L1 has VLS enabled though, this is wrong. KVM should treat RAX as an
L2 GPA an run it through the NPT first before using it (e.g. through
translate_nested_gpa()).
Synthesizing a spurious #VMEXIT(VMLOAD/VMSAVE) is definitely better than
letting L2 bypass L1's NPTs and access its memory. So this change is a
net loss. I will drop it from the next version, and this spurious
#VMEXIT can be fixed separately to keep this series focused on fixing
the non-architectural #GPs.
> return 1;
> }
> return svm_instr_handlers[opcode](vcpu);
> @@ -2269,8 +2270,11 @@ static int gp_interception(struct kvm_vcpu *vcpu)
> goto reinject;
>
> opcode = svm_instr_opcode(vcpu);
> - if (opcode != NONE_SVM_INSTR)
> + if (opcode != NONE_SVM_INSTR) {
> + if (svm->vmcb->save.cpl)
> + goto reinject;
> return emulate_svm_instr(vcpu, opcode);
> + }
>
> if (!enable_vmware_backdoor)
> goto reinject;
>
> ---
>
> Sean, do you prefer that I send patches separately on top of this
> series or a new version with these patches included?
On Fri, Mar 13, 2026, Yosry Ahmed wrote:
> > @@ -2306,24 +2312,18 @@ static int gp_interception(struct kvm_vcpu *vcpu)
> > goto reinject;
> >
> > opcode = svm_instr_opcode(vcpu);
> > + if (opcode != NONE_SVM_INSTR)
> > + return emulate_svm_instr(vcpu, opcode);
> >
> > - if (opcode == NONE_SVM_INSTR) {
> > - if (!enable_vmware_backdoor)
> > - goto reinject;
> > -
> > - /*
> > - * VMware backdoor emulation on #GP interception only handles
> > - * IN{S}, OUT{S}, and RDPMC.
> > - */
> > - if (!is_guest_mode(vcpu))
> > - return kvm_emulate_instruction(vcpu,
> > - EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
> > - } else {
> > - if (!page_address_valid(vcpu, svm->vmcb->save.rax))
> > - goto reinject;
> > + if (!enable_vmware_backdoor)
> > + goto reinject;
> >
> > - return emulate_svm_instr(vcpu, opcode);
> > - }
> > + /*
> > + * VMware backdoor emulation on #GP interception only handles
> > + * IN{S}, OUT{S}, and RDPMC.
> > + */
> > + if (!is_guest_mode(vcpu))
> > + return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
>
> AI review pointed out that we should not drop the page_address_valid()
> from here, because if an SVM instruction is executed by L2, and KVM
> intercepts the #GP, it should re-inject the #GP into L2 if RAX is
> illegal instead of synthesizing a #VMEXIT to L1.
No, because the intercept has higher priority than the #GP due to bad RAX.
> My initial instincth is to keep the check here as well as in the intercept
> handlers, but no, L1's intercept should take precedence over #GP due to
> invalid RAX anyway. In fact, if L1 has the intercept set, then it must be set
> in vmcb02, and KVM would get a #VMEXIT on the intercept not on #GP.
Except for the erratum case.
> The actual problem is that the current code does not check if L1
> actually sets the intercept in emulate_svm_instr().
Oh dagnabbit. I had thought about this, multiple times, but wrote it off as a
non-issue because if L1 wanted to intercept VMWHATEVER, KVM would set the intercept
in vmcb02 and would get _that_ instead of a #GP. But the erratum case means that
hardware could have signaled #GP even when the instruction should have been
intercepted.
And I also forgot the KVM could be intercepting #GP for the VMware crud, which
would unintentionally grab the CPL case too. Darn kitchen sink #GPs.
> So if L1 and KVM do not set the intercept, and RAX is invalid, the current
> code could synthesize a spurious #VMEXIT to L1 instead of reinjecting #GP.
> The existing check on RAX prevents that, but it doesn't really fix the
> problem because if we get #GP due to CPL != 0, we'll still generate a
> spurious #VMEXIT to L1. What we really should be doing in gp_interception()
> is:
>
> 1. if CPL != 0, re-inject #GP.
> 2. If in guest mode and L1 intercepts the instruction, synthesize a #VMEXIT.
> 3. Otherwise emulate the instruction, which would take care of
> re-injecting the #GP if RAX is invalid with this patch.
>
> Something like this on top (over 2 patches):
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index cf5ebdc4b27bf..8942272eb80b2 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2237,10 +2237,11 @@ static int emulate_svm_instr(struct kvm_vcpu
> *vcpu, int opcode)
> [SVM_INSTR_VMLOAD] = vmload_interception,
> [SVM_INSTR_VMSAVE] = vmsave_interception,
> };
> + int exit_code = guest_mode_exit_codes[opcode];
> struct vcpu_svm *svm = to_svm(vcpu);
>
> - if (is_guest_mode(vcpu)) {
> - nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
> + if (is_guest_mode(vcpu) &&
> vmcb12_is_intercept(&svm->nested.ctl, exit_code))
> + nested_svm_simple_vmexit(svm, exit_code);
> return 1;
> }
> return svm_instr_handlers[opcode](vcpu);
> @@ -2269,8 +2270,11 @@ static int gp_interception(struct kvm_vcpu *vcpu)
> goto reinject;
>
> opcode = svm_instr_opcode(vcpu);
> - if (opcode != NONE_SVM_INSTR)
> + if (opcode != NONE_SVM_INSTR) {
> + if (svm->vmcb->save.cpl)
> + goto reinject;
Don't you need the page_address_valid() check here? Ooooh, no, because either
emulate_svm_instr() will synthesize #VMEXIT, or svm_instr_handlers() will take
care of the #GP. It's only CPL that needs to be checked early, because it has
priority over the #VMEXIT.
> return emulate_svm_instr(vcpu, opcode);
> + }
>
> if (!enable_vmware_backdoor)
> goto reinject;
>
> ---
>
> Sean, do you prefer that I send patches separately on top of this
> series or a new version with these patches included?
Go ahead and send an entirely new series. The less threads I have to chase down
after I get back, the less likely I am to screw things up :-)
> > > + /*
> > > + * VMware backdoor emulation on #GP interception only handles
> > > + * IN{S}, OUT{S}, and RDPMC.
> > > + */
> > > + if (!is_guest_mode(vcpu))
> > > + return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
> >
> > AI review pointed out that we should not drop the page_address_valid()
> > from here, because if an SVM instruction is executed by L2, and KVM
> > intercepts the #GP, it should re-inject the #GP into L2 if RAX is
> > illegal instead of synthesizing a #VMEXIT to L1.
>
> No, because the intercept has higher priority than the #GP due to bad RAX.
Which is literally what I say next :P
>
> > My initial instincth is to keep the check here as well as in the intercept
> > handlers, but no, L1's intercept should take precedence over #GP due to
> > invalid RAX anyway. In fact, if L1 has the intercept set, then it must be set
> > in vmcb02, and KVM would get a #VMEXIT on the intercept not on #GP.
>
> Except for the erratum case.
Yes.
>
> > The actual problem is that the current code does not check if L1
> > actually sets the intercept in emulate_svm_instr().
>
> Oh dagnabbit. I had thought about this, multiple times, but wrote it off as a
> non-issue because if L1 wanted to intercept VMWHATEVER, KVM would set the intercept
> in vmcb02 and would get _that_ instead of a #GP. But the erratum case means that
> hardware could have signaled #GP even when the instruction should have been
> intercepted.
The problem is actually the other way around, it's when L1 does not want
to intercept it. So I think it's a problem regardless of the erratum.
> And I also forgot the KVM could be intercepting #GP for the VMware crud, which
> would unintentionally grab the CPL case too. Darn kitchen sink #GPs.
>
> > So if L1 and KVM do not set the intercept, and RAX is invalid, the current
> > code could synthesize a spurious #VMEXIT to L1 instead of reinjecting #GP.
> > The existing check on RAX prevents that, but it doesn't really fix the
> > problem because if we get #GP due to CPL != 0, we'll still generate a
> > spurious #VMEXIT to L1. What we really should be doing in gp_interception()
> > is:
> >
> > 1. if CPL != 0, re-inject #GP.
> > 2. If in guest mode and L1 intercepts the instruction, synthesize a #VMEXIT.
> > 3. Otherwise emulate the instruction, which would take care of
> > re-injecting the #GP if RAX is invalid with this patch.
> >
> > Something like this on top (over 2 patches):
> >
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index cf5ebdc4b27bf..8942272eb80b2 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -2237,10 +2237,11 @@ static int emulate_svm_instr(struct kvm_vcpu
> > *vcpu, int opcode)
> > [SVM_INSTR_VMLOAD] = vmload_interception,
> > [SVM_INSTR_VMSAVE] = vmsave_interception,
> > };
> > + int exit_code = guest_mode_exit_codes[opcode];
> > struct vcpu_svm *svm = to_svm(vcpu);
> >
> > - if (is_guest_mode(vcpu)) {
> > - nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
> > + if (is_guest_mode(vcpu) &&
> > vmcb12_is_intercept(&svm->nested.ctl, exit_code))
> > + nested_svm_simple_vmexit(svm, exit_code);
> > return 1;
> > }
> > return svm_instr_handlers[opcode](vcpu);
> > @@ -2269,8 +2270,11 @@ static int gp_interception(struct kvm_vcpu *vcpu)
> > goto reinject;
> >
> > opcode = svm_instr_opcode(vcpu);
> > - if (opcode != NONE_SVM_INSTR)
> > + if (opcode != NONE_SVM_INSTR) {
> > + if (svm->vmcb->save.cpl)
> > + goto reinject;
>
> Don't you need the page_address_valid() check here? Ooooh, no, because either
> emulate_svm_instr() will synthesize #VMEXIT, or svm_instr_handlers() will take
> care of the #GP. It's only CPL that needs to be checked early, because it has
> priority over the #VMEXIT.
Yeah, exactly my thought process.
>
> > return emulate_svm_instr(vcpu, opcode);
> > + }
> >
> > if (!enable_vmware_backdoor)
> > goto reinject;
> >
> > ---
> >
> > Sean, do you prefer that I send patches separately on top of this
> > series or a new version with these patches included?
>
> Go ahead and send an entirely new series. The less threads I have to chase down
> after I get back, the less likely I am to screw things up :-)
I will send one next week.
I might also add a patch at the end up cleaning up all of this
svm_instr_opcode() and emulate_svm_instr() stuff. The code is
unnecessarily convoluted, we get the opcode in one place then key off of
it in another.
I think it would be nicer with a single helper to handle SVM
instructions, and that would create a good spot to add a comment about
precedence ordering. Something like this:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a0dacbeaa3c5a..d5afcb179398b 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2235,54 +2235,42 @@ static int vmrun_interception(struct kvm_vcpu *vcpu)
return nested_svm_vmrun(vcpu);
}
-enum {
- NONE_SVM_INSTR,
- SVM_INSTR_VMRUN,
- SVM_INSTR_VMLOAD,
- SVM_INSTR_VMSAVE,
-};
-
-/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
-static int svm_instr_opcode(struct kvm_vcpu *vcpu)
+static bool check_emulate_svm_instr(struct kvm_vcpu *vcpu, int *ret)
{
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ int exit_code;
if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
- return NONE_SVM_INSTR;
+ return false;
switch (ctxt->modrm) {
case 0xd8: /* VMRUN */
- return SVM_INSTR_VMRUN;
+ exit_code = SVM_EXIT_VMRUN;
+ break;
case 0xda: /* VMLOAD */
- return SVM_INSTR_VMLOAD;
+ exit_code = SVM_EXIT_VMLOAD;
+ break;
case 0xdb: /* VMSAVE */
- return SVM_INSTR_VMSAVE;
- default:
+ exit_code = SVM_EXIT_VMSAVE;
break;
+ default:
+ return false;
}
- return NONE_SVM_INSTR;
-}
-
-static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
-{
- const int guest_mode_exit_codes[] = {
- [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
- [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
- [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
- };
- int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
- [SVM_INSTR_VMRUN] = vmrun_interception,
- [SVM_INSTR_VMLOAD] = vmload_interception,
- [SVM_INSTR_VMSAVE] = vmsave_interception,
- };
- struct vcpu_svm *svm = to_svm(vcpu);
+ /*
+ * #GP due to CPL != 0 takes precedence over intercepts, but intercepts
+ * take precedence over #GP due to invalid RAX (which is checked by the
+ * exit handlers).
+ */
+ *ret = 1;
+ if (to_svm(vcpu)->vmcb->save.cpl)
+ kvm_inject_gp(vcpu, 0);
+ else if (is_guest_mode(vcpu) && vmcb12_is_intercept(&svm->nested.ctl, exit_code))
+ nested_svm_simple_vmexit(svm, exit_code);
+ else
+ *ret = svm_invoke_exit_handler(vcpu, exit_code);
- if (is_guest_mode(vcpu)) {
- nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
- return 1;
- }
- return svm_instr_handlers[opcode](vcpu);
+ return true;
}
/*
@@ -2297,7 +2285,7 @@ static int gp_interception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 error_code = svm->vmcb->control.exit_info_1;
- int opcode;
+ int r;
/* Both #GP cases have zero error_code */
if (error_code)
@@ -2307,9 +2295,8 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
goto reinject;
- opcode = svm_instr_opcode(vcpu);
- if (opcode != NONE_SVM_INSTR)
- return emulate_svm_instr(vcpu, opcode);
+ if (check_emulate_svm_instr(vcpu, &r))
+ return r;
if (!enable_vmware_backdoor)
goto reinject;
---
The only thing I am unsure of is whether to check if it's an SVM
instruction in a separate helper to avoid the output parameter.
© 2016 - 2026 Red Hat, Inc.