Extract the computation of eventsel_hw from amd_pmu_set_msr() into a
separate helper function, amd_pmu_set_eventsel_hw().
No functional change intended.
Signed-off-by: Jim Mattson <jmattson@google.com>
---
arch/x86/kvm/svm/pmu.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 7aa298eeb072..d9ca633f9f49 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -147,6 +147,12 @@ static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
}
+static void amd_pmu_set_eventsel_hw(struct kvm_pmc *pmc)
+{
+ pmc->eventsel_hw = (pmc->eventsel & ~AMD64_EVENTSEL_HOSTONLY) |
+ AMD64_EVENTSEL_GUESTONLY;
+}
+
static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -166,8 +172,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
data &= ~pmu->reserved_bits;
if (data != pmc->eventsel) {
pmc->eventsel = data;
- pmc->eventsel_hw = (data & ~AMD64_EVENTSEL_HOSTONLY) |
- AMD64_EVENTSEL_GUESTONLY;
+ amd_pmu_set_eventsel_hw(pmc);
kvm_pmu_request_counter_reprogram(pmc);
}
return 0;
--
2.53.0.rc1.225.gd81095ad13-goog