[PATCH v7 03/26] KVM: SVM: Add missing save/restore handling of LBR MSRs

Yosry Ahmed posted 26 patches 1 month ago
[PATCH v7 03/26] KVM: SVM: Add missing save/restore handling of LBR MSRs
Posted by Yosry Ahmed 1 month ago
MSR_IA32_DEBUGCTLMSR and LBR MSRs are currently not enumerated by
KVM_GET_MSR_INDEX_LIST, and LBR MSRs cannot be set with KVM_SET_MSRS. So
save/restore is completely broken.

Fix it by adding the MSRs to msrs_to_save_base, and allowing writes to
LBR MSRs from userspace only (as they are read-only MSRs). Additionally,
to correctly restore L1's LBRs while L2 is running, make sure the LBRs
are copied from the captured VMCB01 save area in svm_copy_vmrun_state().

For VMX, this also adds save/restore handling of KVM_GET_MSR_INDEX_LIST.
For unspported MSR_IA32_LAST* MSRs, kvm_do_msr_access() should 0 these
MSRs on userspace reads, and ignore KVM_MSR_RET_UNSUPPORTED on userspace
writes.

Fixes: 24e09cbf480a ("KVM: SVM: enable LBR virtualization")
Cc: stable@vger.kernel.org
Reported-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Yosry Ahmed <yosry@kernel.org>
---
 arch/x86/kvm/svm/nested.c |  5 +++++
 arch/x86/kvm/svm/svm.c    | 24 ++++++++++++++++++++++++
 arch/x86/kvm/x86.c        |  3 +++
 3 files changed, 32 insertions(+)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index f7d5db0af69ac..3bf758c9cb85c 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1100,6 +1100,11 @@ void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
 		to_save->isst_addr = from_save->isst_addr;
 		to_save->ssp = from_save->ssp;
 	}
+
+	if (lbrv) {
+		svm_copy_lbrs(to_save, from_save);
+		to_save->dbgctl &= ~DEBUGCTL_RESERVED_BITS;
+	}
 }
 
 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f52e588317fcf..cb53174583a26 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3071,6 +3071,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
 		svm_update_lbrv(vcpu);
 		break;
+	case MSR_IA32_LASTBRANCHFROMIP:
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.br_from = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
+	case MSR_IA32_LASTBRANCHTOIP:
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.br_to = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
+	case MSR_IA32_LASTINTFROMIP:
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.last_excp_from = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
+	case MSR_IA32_LASTINTTOIP:
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.last_excp_to = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
 	case MSR_VM_HSAVE_PA:
 		/*
 		 * Old kernels did not validate the value written to
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index db3f393192d94..416899b5dbe4d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -351,6 +351,9 @@ static const u32 msrs_to_save_base[] = {
 	MSR_IA32_U_CET, MSR_IA32_S_CET,
 	MSR_IA32_PL0_SSP, MSR_IA32_PL1_SSP, MSR_IA32_PL2_SSP,
 	MSR_IA32_PL3_SSP, MSR_IA32_INT_SSP_TAB,
+	MSR_IA32_DEBUGCTLMSR,
+	MSR_IA32_LASTBRANCHFROMIP, MSR_IA32_LASTBRANCHTOIP,
+	MSR_IA32_LASTINTFROMIP, MSR_IA32_LASTINTTOIP,
 };
 
 static const u32 msrs_to_save_pmu[] = {
-- 
2.53.0.473.g4a7958ca14-goog
Re: [PATCH v7 03/26] KVM: SVM: Add missing save/restore handling of LBR MSRs
Posted by Sean Christopherson 4 weeks, 1 day ago
On Tue, Mar 03, 2026, Yosry Ahmed wrote:
> MSR_IA32_DEBUGCTLMSR and LBR MSRs are currently not enumerated by
> KVM_GET_MSR_INDEX_LIST, and LBR MSRs cannot be set with KVM_SET_MSRS. So
> save/restore is completely broken.
> 
> Fix it by adding the MSRs to msrs_to_save_base, and allowing writes to
> LBR MSRs from userspace only (as they are read-only MSRs). Additionally,
> to correctly restore L1's LBRs while L2 is running, make sure the LBRs
> are copied from the captured VMCB01 save area in svm_copy_vmrun_state().
> 
> For VMX, this also adds save/restore handling of KVM_GET_MSR_INDEX_LIST.
> For unspported MSR_IA32_LAST* MSRs, kvm_do_msr_access() should 0 these
> MSRs on userspace reads, and ignore KVM_MSR_RET_UNSUPPORTED on userspace
> writes.
> 
> Fixes: 24e09cbf480a ("KVM: SVM: enable LBR virtualization")
> Cc: stable@vger.kernel.org
> Reported-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Yosry Ahmed <yosry@kernel.org>
> ---
>  arch/x86/kvm/svm/nested.c |  5 +++++
>  arch/x86/kvm/svm/svm.c    | 24 ++++++++++++++++++++++++
>  arch/x86/kvm/x86.c        |  3 +++
>  3 files changed, 32 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index f7d5db0af69ac..3bf758c9cb85c 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -1100,6 +1100,11 @@ void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
>  		to_save->isst_addr = from_save->isst_addr;
>  		to_save->ssp = from_save->ssp;
>  	}
> +
> +	if (lbrv) {

Tomato, tomato, but maybe make this

	if (kvm_cpu_cap_has(X86_FEATURE_LBRV)) {

to capture that this requires nested support.  I can't imagine we'll ever disable
X86_FEATURE_LBRV when nested=1 && lbrv=1, but I don't see any harm in being
paranoid in this case.

> +		svm_copy_lbrs(to_save, from_save);
> +		to_save->dbgctl &= ~DEBUGCTL_RESERVED_BITS;
> +	}
>  }
>  
>  void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index f52e588317fcf..cb53174583a26 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3071,6 +3071,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
>  		svm_update_lbrv(vcpu);
>  		break;
> +	case MSR_IA32_LASTBRANCHFROMIP:

Shouldn't these be gated on lbrv?  If LBRV is truly unsupported, KVM would be
writing "undefined" fields and clearing "unknown" clean bits.

Specifically, if we do:

		if (!lbrv)
			return KVM_MSR_RET_UNSUPPORTED;

then kvm_do_msr_access() will allow writes of '0' from the host, via this code:

	if (host_initiated && !*data && kvm_is_advertised_msr(msr))
		return 0;

And then in the read side, do e.g.:

	msr_info->data = lbrv ? svm->vmcb->save.dbgctl : 0;

to ensure KVM won't feed userspace garbage (the VMCB fields should be '0', but
there's no reason to risk that).

The changelog also needs to call out that kvm_set_msr_common() returns
KVM_MSR_RET_UNSUPPORTED for unhandled MSRs (i.e. for VMX and TDX), and that
kvm_get_msr_common() explicitly zeros the data for MSR_IA32_LASTxxx (because per
b5e2fec0ebc3 ("KVM: Ignore DEBUGCTL MSRs with no effect"), old and crust kernels
would read the MSRs on Intel...).

So all in all (not yet tested), this?  If this is the only issue in the series,
or at least in the stable@ part of the series, no need for a v8 (I've obviously
already done the fixup).

--
From: Yosry Ahmed <yosry@kernel.org>
Date: Tue, 3 Mar 2026 00:33:57 +0000
Subject: [PATCH] KVM: SVM: Add missing save/restore handling of LBR MSRs

MSR_IA32_DEBUGCTLMSR and LBR MSRs are currently not enumerated by
KVM_GET_MSR_INDEX_LIST, and LBR MSRs cannot be set with KVM_SET_MSRS. So
save/restore is completely broken.

Fix it by adding the MSRs to msrs_to_save_base, and allowing writes to
LBR MSRs from userspace only (as they are read-only MSRs) if LBR
virtualization is enabled.  Additionally, to correctly restore L1's LBRs
while L2 is running, make sure the LBRs are copied from the captured
VMCB01 save area in svm_copy_vmrun_state().

Note, for VMX, this also fixes a flaw where MSR_IA32_DEBUGCTLMSR isn't
reported as an MSR to save/restore.

Note #2, over-reporting MSR_IA32_LASTxxx on Intel is ok, as KVM already
handles unsupported reads and writes thanks to commit b5e2fec0ebc3 ("KVM:
Ignore DEBUGCTL MSRs with no effect") (kvm_do_msr_access() will morph the
unsupported userspace write into a nop).

Fixes: 24e09cbf480a ("KVM: SVM: enable LBR virtualization")
Cc: stable@vger.kernel.org
Reported-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Yosry Ahmed <yosry@kernel.org>
Link: https://patch.msgid.link/20260303003421.2185681-4-yosry@kernel.org
[sean: guard with lbrv checks, massage changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/nested.c |  5 +++++
 arch/x86/kvm/svm/svm.c    | 44 +++++++++++++++++++++++++++++++++------
 arch/x86/kvm/x86.c        |  3 +++
 3 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index d0faa3e2dc97..d142761ad517 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1098,6 +1098,11 @@ void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
 		to_save->isst_addr = from_save->isst_addr;
 		to_save->ssp = from_save->ssp;
 	}
+
+	if (kvm_cpu_cap_has(X86_FEATURE_LBRV)) {
+		svm_copy_lbrs(to_save, from_save);
+		to_save->dbgctl &= ~DEBUGCTL_RESERVED_BITS;
+	}
 }
 
 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4649cef966f6..317c8c28443a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2788,19 +2788,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = svm->tsc_aux;
 		break;
 	case MSR_IA32_DEBUGCTLMSR:
-		msr_info->data = svm->vmcb->save.dbgctl;
+		msr_info->data = lbrv ? svm->vmcb->save.dbgctl : 0;
 		break;
 	case MSR_IA32_LASTBRANCHFROMIP:
-		msr_info->data = svm->vmcb->save.br_from;
+		msr_info->data = lbrv ? svm->vmcb->save.br_from : 0;
 		break;
 	case MSR_IA32_LASTBRANCHTOIP:
-		msr_info->data = svm->vmcb->save.br_to;
+		msr_info->data = lbrv ? svm->vmcb->save.br_to : 0;
 		break;
 	case MSR_IA32_LASTINTFROMIP:
-		msr_info->data = svm->vmcb->save.last_excp_from;
-		break;
+		msr_info->data = lbrv ? svm->vmcb->save.last_excp_from : 0;
+		breakk;
 	case MSR_IA32_LASTINTTOIP:
-		msr_info->data = svm->vmcb->save.last_excp_to;
+		msr_info->data = lbrv ? svm->vmcb->save.last_excp_to : 0;
 		break;
 	case MSR_VM_HSAVE_PA:
 		msr_info->data = svm->nested.hsave_msr;
@@ -3075,6 +3075,38 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
 		svm_update_lbrv(vcpu);
 		break;
+	case MSR_IA32_LASTBRANCHFROMIP:
+		if (!lbrv)
+			return KVM_MSR_RET_UNSUPPORTED;
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.br_from = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
+	case MSR_IA32_LASTBRANCHTOIP:
+		if (!lbrv)
+			return KVM_MSR_RET_UNSUPPORTED;
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.br_to = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
+	case MSR_IA32_LASTINTFROMIP:
+		if (!lbrv)
+			return KVM_MSR_RET_UNSUPPORTED;
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.last_excp_from = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
+	case MSR_IA32_LASTINTTOIP:
+		if (!lbrv)
+			return KVM_MSR_RET_UNSUPPORTED;
+		if (!msr->host_initiated)
+			return 1;
+		svm->vmcb->save.last_excp_to = data;
+		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+		break;
 	case MSR_VM_HSAVE_PA:
 		/*
 		 * Old kernels did not validate the value written to
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6e87ec52fa06..64da02d1ee00 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -351,6 +351,9 @@ static const u32 msrs_to_save_base[] = {
 	MSR_IA32_U_CET, MSR_IA32_S_CET,
 	MSR_IA32_PL0_SSP, MSR_IA32_PL1_SSP, MSR_IA32_PL2_SSP,
 	MSR_IA32_PL3_SSP, MSR_IA32_INT_SSP_TAB,
+	MSR_IA32_DEBUGCTLMSR,
+	MSR_IA32_LASTBRANCHFROMIP, MSR_IA32_LASTBRANCHTOIP,
+	MSR_IA32_LASTINTFROMIP, MSR_IA32_LASTINTTOIP,
 };
 
 static const u32 msrs_to_save_pmu[] = {

base-commit: 149b996ea367eef39faf82ccba0659a5f3d389ea
--
Re: [PATCH v7 03/26] KVM: SVM: Add missing save/restore handling of LBR MSRs
Posted by Yosry Ahmed 4 weeks, 1 day ago
> > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> > index f7d5db0af69ac..3bf758c9cb85c 100644
> > --- a/arch/x86/kvm/svm/nested.c
> > +++ b/arch/x86/kvm/svm/nested.c
> > @@ -1100,6 +1100,11 @@ void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
> >               to_save->isst_addr = from_save->isst_addr;
> >               to_save->ssp = from_save->ssp;
> >       }
> > +
> > +     if (lbrv) {
>
> Tomato, tomato, but maybe make this
>
>         if (kvm_cpu_cap_has(X86_FEATURE_LBRV)) {
>
> to capture that this requires nested support.  I can't imagine we'll ever disable
> X86_FEATURE_LBRV when nested=1 && lbrv=1, but I don't see any harm in being
> paranoid in this case.

Sounds good.

>
> > +             svm_copy_lbrs(to_save, from_save);
> > +             to_save->dbgctl &= ~DEBUGCTL_RESERVED_BITS;
> > +     }
> >  }
> >
> >  void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index f52e588317fcf..cb53174583a26 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -3071,6 +3071,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> >               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> >               svm_update_lbrv(vcpu);
> >               break;
> > +     case MSR_IA32_LASTBRANCHFROMIP:
>
> Shouldn't these be gated on lbrv?  If LBRV is truly unsupported, KVM would be
> writing "undefined" fields and clearing "unknown" clean bits.
>
> Specifically, if we do:
>
>                 if (!lbrv)
>                         return KVM_MSR_RET_UNSUPPORTED;
>
> then kvm_do_msr_access() will allow writes of '0' from the host, via this code:
>
>         if (host_initiated && !*data && kvm_is_advertised_msr(msr))
>                 return 0;
>
> And then in the read side, do e.g.:
>
>         msr_info->data = lbrv ? svm->vmcb->save.dbgctl : 0;
>
> to ensure KVM won't feed userspace garbage (the VMCB fields should be '0', but
> there's no reason to risk that).

Good call.

>
> The changelog also needs to call out that kvm_set_msr_common() returns
> KVM_MSR_RET_UNSUPPORTED for unhandled MSRs (i.e. for VMX and TDX), and that
> kvm_get_msr_common() explicitly zeros the data for MSR_IA32_LASTxxx (because per
> b5e2fec0ebc3 ("KVM: Ignore DEBUGCTL MSRs with no effect"), old and crust kernels
> would read the MSRs on Intel...).

That was captured (somehow):

For VMX, this also adds save/restore handling of KVM_GET_MSR_INDEX_LIST.
For unspported MSR_IA32_LAST* MSRs, kvm_do_msr_access() should 0 these
MSRs on userspace reads, and ignore KVM_MSR_RET_UNSUPPORTED on userspace
writes.

>
> So all in all (not yet tested), this?  If this is the only issue in the series,
> or at least in the stable@ part of the series, no need for a v8 (I've obviously
> already done the fixup).

Looks good with a minor nit below (could be a followup).

> @@ -3075,6 +3075,38 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>                 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
>                 svm_update_lbrv(vcpu);
>                 break;
> +       case MSR_IA32_LASTBRANCHFROMIP:
> +               if (!lbrv)
> +                       return KVM_MSR_RET_UNSUPPORTED;
> +               if (!msr->host_initiated)
> +                       return 1;
> +               svm->vmcb->save.br_from = data;
> +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> +               break;
> +       case MSR_IA32_LASTBRANCHTOIP:
> +               if (!lbrv)
> +                       return KVM_MSR_RET_UNSUPPORTED;
> +               if (!msr->host_initiated)
> +                       return 1;
> +               svm->vmcb->save.br_to = data;
> +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> +               break;
> +       case MSR_IA32_LASTINTFROMIP:
> +               if (!lbrv)
> +                       return KVM_MSR_RET_UNSUPPORTED;
> +               if (!msr->host_initiated)
> +                       return 1;
> +               svm->vmcb->save.last_excp_from = data;
> +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> +               break;
> +       case MSR_IA32_LASTINTTOIP:
> +               if (!lbrv)
> +                       return KVM_MSR_RET_UNSUPPORTED;
> +               if (!msr->host_initiated)
> +                       return 1;
> +               svm->vmcb->save.last_excp_to = data;
> +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> +               break;

There's so much repeated code here. We can use gotos to share code,
but I am not sure if that's a strict improvement. We can also use a
helper, perhaps?

static int svm_set_lbr_msr(struct vcpu_svm *svm, struct msr_data *msr,
u64 data, u64 *field)
{
       if (!lbrv)
               return KVM_MSR_RET_UNSUPPORTED;
       if (!msr->host_initiated)
               return 1;
       *field = data;
        vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
        return 0;
}

...

       case MSR_IA32_LASTBRANCHFROMIP:
             ret = svm_set_lbr_msr(svm, msr, data, &svm->vmcb->save.br_from);
             if (ret)
                        return ret;
              break;
...
Re: [PATCH v7 03/26] KVM: SVM: Add missing save/restore handling of LBR MSRs
Posted by Sean Christopherson 4 weeks, 1 day ago
On Tue, Mar 03, 2026, Yosry Ahmed wrote:
> > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> > So all in all (not yet tested), this?  If this is the only issue in the series,
> > or at least in the stable@ part of the series, no need for a v8 (I've obviously
> > already done the fixup).
> 
> Looks good with a minor nit below (could be a followup).
> 
> > @@ -3075,6 +3075,38 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> >                 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> >                 svm_update_lbrv(vcpu);
> >                 break;
> > +       case MSR_IA32_LASTBRANCHFROMIP:
> > +               if (!lbrv)
> > +                       return KVM_MSR_RET_UNSUPPORTED;
> > +               if (!msr->host_initiated)
> > +                       return 1;
> > +               svm->vmcb->save.br_from = data;
> > +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> > +               break;
> > +       case MSR_IA32_LASTBRANCHTOIP:
> > +               if (!lbrv)
> > +                       return KVM_MSR_RET_UNSUPPORTED;
> > +               if (!msr->host_initiated)
> > +                       return 1;
> > +               svm->vmcb->save.br_to = data;
> > +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> > +               break;
> > +       case MSR_IA32_LASTINTFROMIP:
> > +               if (!lbrv)
> > +                       return KVM_MSR_RET_UNSUPPORTED;
> > +               if (!msr->host_initiated)
> > +                       return 1;
> > +               svm->vmcb->save.last_excp_from = data;
> > +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> > +               break;
> > +       case MSR_IA32_LASTINTTOIP:
> > +               if (!lbrv)
> > +                       return KVM_MSR_RET_UNSUPPORTED;
> > +               if (!msr->host_initiated)
> > +                       return 1;
> > +               svm->vmcb->save.last_excp_to = data;
> > +               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
> > +               break;
> 
> There's so much repeated code here. 

Ya :-(

> We can use gotos to share code, but I am not sure if that's a strict
> improvement. We can also use a helper, perhaps?


Where's your sense of adventure?

	case MSR_IA32_LASTBRANCHFROMIP:
	case MSR_IA32_LASTBRANCHTOIP:
	case MSR_IA32_LASTINTFROMIP:
	case MSR_IA32_LASTINTTOIP:
		if (!lbrv)
			return KVM_MSR_RET_UNSUPPORTED;
		if (!msr->host_initiated)
			return 1;
		*(&svm->vmcb->save.br_from + (ecx - MSR_IA32_LASTBRANCHFROMIP)) = data;
		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
		break;

Jokes aside, maybe this, to dedup get() at the same time?

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 68b747a94294..f1811105e89f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2720,6 +2720,23 @@ static int svm_get_feature_msr(u32 msr, u64 *data)
        return 0;
 }
 
+static __always_inline u64 *svm_vmcb_lbr(struct vcpu_svm *svm, u32 msr)
+{
+       switch (msr) {
+       case MSR_IA32_LASTBRANCHFROMIP:
+               return &svm->vmcb->save.br_from;
+       case MSR_IA32_LASTBRANCHTOIP:
+               return &svm->vmcb->save.br_to;
+       case MSR_IA32_LASTINTFROMIP:
+               return &svm->vmcb->save.last_excp_from;
+       case MSR_IA32_LASTINTTOIP:
+               return &svm->vmcb->save.last_excp_to;
+       default:
+               break;
+       }
+       BUILD_BUG();
+}
+
 static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu,
                                      struct msr_data *msr_info)
 {
@@ -2838,16 +2855,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = lbrv ? svm->vmcb->save.dbgctl : 0;
                break;
        case MSR_IA32_LASTBRANCHFROMIP:
-               msr_info->data = lbrv ? svm->vmcb->save.br_from : 0;
-               break;
        case MSR_IA32_LASTBRANCHTOIP:
-               msr_info->data = lbrv ? svm->vmcb->save.br_to : 0;
-               break;
        case MSR_IA32_LASTINTFROMIP:
-               msr_info->data = lbrv ? svm->vmcb->save.last_excp_from : 0;
-               break;
        case MSR_IA32_LASTINTTOIP:
-               msr_info->data = lbrv ? svm->vmcb->save.last_excp_to : 0;
+               msr_info->data = lbrv ? *svm_vmcb_lbr(svm, msr_info->index) : 0;
                break;
        case MSR_VM_HSAVE_PA:
                msr_info->data = svm->nested.hsave_msr;
@@ -3122,35 +3133,14 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                svm_update_lbrv(vcpu);
                break;
        case MSR_IA32_LASTBRANCHFROMIP:
-               if (!lbrv)
-                       return KVM_MSR_RET_UNSUPPORTED;
-               if (!msr->host_initiated)
-                       return 1;
-               svm->vmcb->save.br_from = data;
-               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
-               break;
        case MSR_IA32_LASTBRANCHTOIP:
-               if (!lbrv)
-                       return KVM_MSR_RET_UNSUPPORTED;
-               if (!msr->host_initiated)
-                       return 1;
-               svm->vmcb->save.br_to = data;
-               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
-               break;
        case MSR_IA32_LASTINTFROMIP:
-               if (!lbrv)
-                       return KVM_MSR_RET_UNSUPPORTED;
-               if (!msr->host_initiated)
-                       return 1;
-               svm->vmcb->save.last_excp_from = data;
-               vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
-               break;
        case MSR_IA32_LASTINTTOIP:
                if (!lbrv)
                        return KVM_MSR_RET_UNSUPPORTED;
                if (!msr->host_initiated)
                        return 1;
-               svm->vmcb->save.last_excp_to = data;
+               *svm_vmcb_lbr(svm, ecx) = data;
                vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
                break;
        case MSR_VM_HSAVE_PA:
Re: [PATCH v7 03/26] KVM: SVM: Add missing save/restore handling of LBR MSRs
Posted by Yosry Ahmed 4 weeks, 1 day ago
> > There's so much repeated code here.
>
> Ya :-(
>
> > We can use gotos to share code, but I am not sure if that's a strict
> > improvement. We can also use a helper, perhaps?
>
>
> Where's your sense of adventure?
>
>         case MSR_IA32_LASTBRANCHFROMIP:
>         case MSR_IA32_LASTBRANCHTOIP:
>         case MSR_IA32_LASTINTFROMIP:
>         case MSR_IA32_LASTINTTOIP:
>                 if (!lbrv)
>                         return KVM_MSR_RET_UNSUPPORTED;
>                 if (!msr->host_initiated)
>                         return 1;
>                 *(&svm->vmcb->save.br_from + (ecx - MSR_IA32_LASTBRANCHFROMIP)) = data;
>                 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
>                 break;
>
> Jokes aside, maybe this, to dedup get() at the same time?

Looks good to me!