[PATCH v4 2/9] KVM: SVM: Refactor SVM instruction handling on #GP intercept

Yosry Ahmed posted 9 patches 3 weeks ago
[PATCH v4 2/9] KVM: SVM: Refactor SVM instruction handling on #GP intercept
Posted by Yosry Ahmed 3 weeks ago
Instead of returning an opcode from svm_instr_opcode() and then passing
it to emulate_svm_instr(), which uses it to find the corresponding exit
code and intercept handler, return the exit code directly from
svm_instr_opcode(), and rename it to svm_instr_exit_code().

emulate_svm_instr() boils down to synthesizing a #VMEXIT or calling the
intercept handler, so open-code it in gp_interception(), and use
svm_invoke_exit_handler() to call the intercept handler based on
the exit code. This allows for dropping the SVM_INSTR_* enum, and the
const array mapping its values to exit codes and intercept handlers.

In gp_intercept(), handle SVM instructions first with an early return,
un-indenting the rest of the code.

No functional change intended.

Signed-off-by: Yosry Ahmed <yosry@kernel.org>
---
 arch/x86/kvm/svm/svm.c | 78 +++++++++++++++---------------------------
 1 file changed, 27 insertions(+), 51 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d2ca226871c2f..392a5088f20bf 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2233,54 +2233,26 @@ static int vmrun_interception(struct kvm_vcpu *vcpu)
 	return nested_svm_vmrun(vcpu);
 }
 
-enum {
-	NONE_SVM_INSTR,
-	SVM_INSTR_VMRUN,
-	SVM_INSTR_VMLOAD,
-	SVM_INSTR_VMSAVE,
-};
-
-/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
-static int svm_instr_opcode(struct kvm_vcpu *vcpu)
+/* Return 0 if not SVM instr, otherwise return associated exit_code */
+static u64 svm_instr_exit_code(struct kvm_vcpu *vcpu)
 {
 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
 
 	if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
-		return NONE_SVM_INSTR;
+		return 0;
 
 	switch (ctxt->modrm) {
 	case 0xd8: /* VMRUN */
-		return SVM_INSTR_VMRUN;
+		return SVM_EXIT_VMRUN;
 	case 0xda: /* VMLOAD */
-		return SVM_INSTR_VMLOAD;
+		return SVM_EXIT_VMLOAD;
 	case 0xdb: /* VMSAVE */
-		return SVM_INSTR_VMSAVE;
+		return SVM_EXIT_VMSAVE;
 	default:
 		break;
 	}
 
-	return NONE_SVM_INSTR;
-}
-
-static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
-{
-	const int guest_mode_exit_codes[] = {
-		[SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
-		[SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
-		[SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
-	};
-	int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
-		[SVM_INSTR_VMRUN] = vmrun_interception,
-		[SVM_INSTR_VMLOAD] = vmload_interception,
-		[SVM_INSTR_VMSAVE] = vmsave_interception,
-	};
-	struct vcpu_svm *svm = to_svm(vcpu);
-
-	if (is_guest_mode(vcpu)) {
-		nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
-		return 1;
-	}
-	return svm_instr_handlers[opcode](vcpu);
+	return 0;
 }
 
 /*
@@ -2295,7 +2267,7 @@ static int gp_interception(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u32 error_code = svm->vmcb->control.exit_info_1;
-	int opcode;
+	u64 svm_exit_code;
 
 	/* Both #GP cases have zero error_code */
 	if (error_code)
@@ -2305,27 +2277,31 @@ static int gp_interception(struct kvm_vcpu *vcpu)
 	if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
 		goto reinject;
 
-	opcode = svm_instr_opcode(vcpu);
-
-	if (opcode == NONE_SVM_INSTR) {
-		if (!enable_vmware_backdoor)
-			goto reinject;
-
-		/*
-		 * VMware backdoor emulation on #GP interception only handles
-		 * IN{S}, OUT{S}, and RDPMC.
-		 */
-		if (!is_guest_mode(vcpu))
-			return kvm_emulate_instruction(vcpu,
-				EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
-	} else {
+	svm_exit_code = svm_instr_exit_code(vcpu);
+	if (svm_exit_code) {
 		/* All SVM instructions expect page aligned RAX */
 		if (svm->vmcb->save.rax & ~PAGE_MASK)
 			goto reinject;
 
-		return emulate_svm_instr(vcpu, opcode);
+		if (is_guest_mode(vcpu)) {
+			nested_svm_simple_vmexit(svm, svm_exit_code);
+			return 1;
+		}
+
+		return svm_invoke_exit_handler(vcpu, svm_exit_code);
 	}
 
+	if (!enable_vmware_backdoor)
+		goto reinject;
+
+	/*
+	 * VMware backdoor emulation on #GP interception only handles
+	 * IN{S}, OUT{S}, and RDPMC.
+	 */
+	if (!is_guest_mode(vcpu))
+		return kvm_emulate_instruction(vcpu,
+				EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
+
 reinject:
 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
 	return 1;
-- 
2.53.0.851.ga537e3e6e9-goog
Re: [PATCH v4 2/9] KVM: SVM: Refactor SVM instruction handling on #GP intercept
Posted by Sean Christopherson 3 days, 8 hours ago
On Mon, Mar 16, 2026, Yosry Ahmed wrote:
> +/* Return 0 if not SVM instr, otherwise return associated exit_code */
> +static u64 svm_instr_exit_code(struct kvm_vcpu *vcpu)

To make it very clear what this is doing how about:

static u64 svm_get_decoded_instr_exit_code(struct kvm_vcpu *vcpu)

>  {
>  	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
>  
>  	if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
> -		return NONE_SVM_INSTR;
> +		return 0;

This should assert to ensure there's no collision with '0', i.e.

	BUILD_BUG_ON(!SVM_EXIT_VMRUN || !SVM_EXIT_VMLOAD || !SVM_EXIT_VMSAVE);


> +	if (!is_guest_mode(vcpu))
> +		return kvm_emulate_instruction(vcpu,
> +				EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);

Since you're moving this anyways:

	if (!is_guest_mode(vcpu))
		return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP |
						     EMULTYPE_NO_DECODE);


Actually!  Better idea, for this code and for the page_address_valid() checks:
invert the checks to reduce indentation, i.e. end up with:

	/* FIXME: Handle SVM instructions through the emulator */
	svm_exit_code = svm_get_decoded_instr_exit_code(vcpu);
	if (svm_exit_code) {
		if (!is_guest_mode(vcpu))
			return svm_invoke_exit_handler(vcpu, svm_exit_code);

		if (nested_svm_check_permissions(vcpu))
			return 1;

		if (!page_address_valid(vcpu, kvm_register_read(vcpu, VCPU_REGS_RAX)))
			goto reinject;

		/*
		 * FIXME: Only synthesize a #VMEXIT if L1 sets the intercept,
		 * but only after the VMLOAD/VMSAVE exit handlers can properly
		 * handle VMLOAD/VMSAVE from L2 with VLS enabled in L1 (i.e.
		 * RAX is an L2 GPA that needs translation through L1's NPT).
		 */
		nested_svm_simple_vmexit(svm, svm_exit_code);
		return 1;
	}

	/*
	 * VMware backdoor emulation on #GP interception only handles
	 * IN{S}, OUT{S}, and RDPMC, and only for L1.
	 */
	if (!enable_vmware_backdoor || is_guest_mode(vcpu))
		goto reinject;

	return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);

> +
>  reinject:
>  	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
>  	return 1;
> -- 
> 2.53.0.851.ga537e3e6e9-goog
>
Re: [PATCH v4 2/9] KVM: SVM: Refactor SVM instruction handling on #GP intercept
Posted by Yosry Ahmed 3 days, 4 hours ago
On Fri, Apr 3, 2026 at 11:18 AM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, Mar 16, 2026, Yosry Ahmed wrote:
> > +/* Return 0 if not SVM instr, otherwise return associated exit_code */
> > +static u64 svm_instr_exit_code(struct kvm_vcpu *vcpu)
>
> To make it very clear what this is doing how about:
>
> static u64 svm_get_decoded_instr_exit_code(struct kvm_vcpu *vcpu)

Yeah I like this better.

>
> >  {
> >       struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
> >
> >       if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
> > -             return NONE_SVM_INSTR;
> > +             return 0;
>
> This should assert to ensure there's no collision with '0', i.e.
>
>         BUILD_BUG_ON(!SVM_EXIT_VMRUN || !SVM_EXIT_VMLOAD || !SVM_EXIT_VMSAVE);

I was gonna add this but I don't think these values can change anyway.
It's good for readability tho so I am fine with it.

>
>
> > +     if (!is_guest_mode(vcpu))
> > +             return kvm_emulate_instruction(vcpu,
> > +                             EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
>
> Since you're moving this anyways:
>
>         if (!is_guest_mode(vcpu))
>                 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP |
>                                                      EMULTYPE_NO_DECODE);
>
>
> Actually!  Better idea, for this code and for the page_address_valid() checks:
> invert the checks to reduce indentation, i.e. end up with:
>
>         /* FIXME: Handle SVM instructions through the emulator */
>         svm_exit_code = svm_get_decoded_instr_exit_code(vcpu);
>         if (svm_exit_code) {
>                 if (!is_guest_mode(vcpu))
>                         return svm_invoke_exit_handler(vcpu, svm_exit_code);
>
>                 if (nested_svm_check_permissions(vcpu))
>                         return 1;
>
>                 if (!page_address_valid(vcpu, kvm_register_read(vcpu, VCPU_REGS_RAX)))
>                         goto reinject;
>
>                 /*
>                  * FIXME: Only synthesize a #VMEXIT if L1 sets the intercept,
>                  * but only after the VMLOAD/VMSAVE exit handlers can properly
>                  * handle VMLOAD/VMSAVE from L2 with VLS enabled in L1 (i.e.
>                  * RAX is an L2 GPA that needs translation through L1's NPT).
>                  */
>                 nested_svm_simple_vmexit(svm, svm_exit_code);
>                 return 1;
>         }
>
>         /*
>          * VMware backdoor emulation on #GP interception only handles
>          * IN{S}, OUT{S}, and RDPMC, and only for L1.
>          */
>         if (!enable_vmware_backdoor || is_guest_mode(vcpu))
>                 goto reinject;
>
>         return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
>
> > +
> >  reinject:
> >       kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
> >       return 1;


The end result looks good to me.