All accesses to the vmcb12 in the guest memory on nested VMRUN are
limited to nested_svm_vmrun() copying vmcb12 fields and writing them on
failed consistency checks. However, vmcb12 remains mapped throughout
nested_svm_vmrun(). Mapping and unmapping around usages is possible,
but it becomes easy-ish to introduce bugs where 'vmcb12' is used after
being unmapped.
Move reading the vmcb12, copying to cache, and consistency checks from
nested_svm_vmrun() into a new helper, nested_svm_copy_vmcb12_to_cache()
to limit the scope of the mapping.
Signed-off-by: Yosry Ahmed <yosry@kernel.org>
---
arch/x86/kvm/svm/nested.c | 89 ++++++++++++++++++++++-----------------
1 file changed, 51 insertions(+), 38 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index f89040a467d4a..0151354b2ef01 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1054,12 +1054,39 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, bool from_vmrun)
return 0;
}
-int nested_svm_vmrun(struct kvm_vcpu *vcpu)
+static int nested_svm_copy_vmcb12_to_cache(struct kvm_vcpu *vcpu, u64 vmcb12_gpa)
{
struct vcpu_svm *svm = to_svm(vcpu);
- int ret;
- struct vmcb *vmcb12;
struct kvm_host_map map;
+ struct vmcb *vmcb12;
+ int r = 0;
+
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map))
+ return -EFAULT;
+
+ vmcb12 = map.hva;
+ nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
+ nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
+
+ if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
+ !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
+ vmcb12->control.exit_code = SVM_EXIT_ERR;
+ vmcb12->control.exit_info_1 = 0;
+ vmcb12->control.exit_info_2 = 0;
+ vmcb12->control.event_inj = 0;
+ vmcb12->control.event_inj_err = 0;
+ svm_set_gif(svm, false);
+ r = -EINVAL;
+ }
+
+ kvm_vcpu_unmap(vcpu, &map);
+ return r;
+}
+
+int nested_svm_vmrun(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int ret, err;
u64 vmcb12_gpa;
struct vmcb *vmcb01 = svm->vmcb01.ptr;
@@ -1080,32 +1107,23 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
return ret;
}
+ if (WARN_ON_ONCE(!svm->nested.initialized))
+ return -EINVAL;
+
vmcb12_gpa = svm->vmcb->save.rax;
- if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) {
+ err = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
+ if (err == -EFAULT) {
kvm_inject_gp(vcpu, 0);
return 1;
}
+ /*
+ * Advance RIP if #GP or #UD are not injected, but otherwise stop if
+ * copying and checking vmcb12 failed.
+ */
ret = kvm_skip_emulated_instruction(vcpu);
-
- vmcb12 = map.hva;
-
- if (WARN_ON_ONCE(!svm->nested.initialized))
- return -EINVAL;
-
- nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
- nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
-
- if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
- !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
- vmcb12->control.exit_code = SVM_EXIT_ERR;
- vmcb12->control.exit_info_1 = 0;
- vmcb12->control.exit_info_2 = 0;
- vmcb12->control.event_inj = 0;
- vmcb12->control.event_inj_err = 0;
- svm_set_gif(svm, false);
- goto out;
- }
+ if (err)
+ return ret;
/*
* Since vmcb01 is not in use, we can use it to store some of the L1
@@ -1122,23 +1140,18 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
svm->nested.nested_run_pending = 1;
- if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true))
- goto out_exit_err;
-
- if (nested_svm_merge_msrpm(vcpu))
- goto out;
-
-out_exit_err:
- svm->nested.nested_run_pending = 0;
-
- svm->vmcb->control.exit_code = SVM_EXIT_ERR;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
+ if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true) ||
+ !nested_svm_merge_msrpm(vcpu)) {
+ svm->nested.nested_run_pending = 0;
+ svm->nmi_l1_to_l2 = false;
+ svm->soft_int_injected = false;
- nested_svm_vmexit(svm);
+ svm->vmcb->control.exit_code = SVM_EXIT_ERR;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
-out:
- kvm_vcpu_unmap(vcpu, &map);
+ nested_svm_vmexit(svm);
+ }
return ret;
}
--
2.53.0.473.g4a7958ca14-goog