Since many guests will never touch the PMU, they need not pay the cost
of context swapping those registers.
Use an enum to implement a simple state machine for PMU register
access. The PMU is either free or guest owned. We only need to context
swap if the PMU registers are guest owned. The PMU initially starts as
free and only transitions to guest owned if a guest has touched the
PMU registers.
Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
arch/arm64/include/asm/kvm_host.h | 1 +
arch/arm64/include/asm/kvm_types.h | 6 +++++-
arch/arm64/kvm/debug.c | 2 +-
arch/arm64/kvm/hyp/vhe/switch.c | 2 ++
arch/arm64/kvm/pmu-direct.c | 26 ++++++++++++++++++++++++--
include/kvm/arm_pmu.h | 5 +++++
6 files changed, 38 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8e09865490a9f..41577ede0254f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1377,6 +1377,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
+void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu);
void kvm_init_host_debug_data(void);
void kvm_debug_init_vhe(void);
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
index 9a126b9e2d7c9..4e39cbc80aa0b 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -4,5 +4,9 @@
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
-#endif /* _ASM_ARM64_KVM_TYPES_H */
+enum vcpu_pmu_register_access {
+ VCPU_PMU_ACCESS_FREE,
+ VCPU_PMU_ACCESS_GUEST_OWNED,
+};
+#endif /* _ASM_ARM64_KVM_TYPES_H */
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 0ab89c91e19cb..c2cf6b308ec60 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -34,7 +34,7 @@ static int cpu_has_spe(u64 dfr0)
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
* - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
*/
-static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
+void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
{
int hpmn = kvm_pmu_hpmn(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 154da70146d98..b374308e786d7 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -524,6 +524,8 @@ static bool kvm_hyp_handle_pmu_regs(struct kvm_vcpu *vcpu)
val = vcpu_get_reg(vcpu, rt);
nr_cnt = vcpu->kvm->arch.nr_pmu_counters;
+ kvm_pmu_set_physical_access(vcpu);
+
switch (sysreg) {
case SYS_PMCR_EL0:
mask = ARMV8_PMU_PMCR_MASK;
diff --git a/arch/arm64/kvm/pmu-direct.c b/arch/arm64/kvm/pmu-direct.c
index 4bcacc55c507f..11fae54cd6534 100644
--- a/arch/arm64/kvm/pmu-direct.c
+++ b/arch/arm64/kvm/pmu-direct.c
@@ -72,10 +72,30 @@ bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu)
u8 hpmn = vcpu->kvm->arch.nr_pmu_counters;
return kvm_vcpu_pmu_is_partitioned(vcpu) &&
+ vcpu->arch.pmu.access == VCPU_PMU_ACCESS_GUEST_OWNED &&
cpus_have_final_cap(ARM64_HAS_FGT) &&
(hpmn != 0 || cpus_have_final_cap(ARM64_HAS_HPMN0));
}
+/**
+ * kvm_pmu_set_physical_access()
+ * @vcpu: Pointer to vcpu struct
+ *
+ * Reconfigure the guest for physical access of PMU hardware if
+ * allowed. This means reconfiguring mdcr_el2 and loading the vCPU
+ * state onto hardware.
+ *
+ */
+
+void kvm_pmu_set_physical_access(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_pmu_is_partitioned(vcpu)
+ && vcpu->arch.pmu.access == VCPU_PMU_ACCESS_FREE) {
+ vcpu->arch.pmu.access = VCPU_PMU_ACCESS_GUEST_OWNED;
+ kvm_arm_setup_mdcr_el2(vcpu);
+ }
+}
+
/**
* kvm_pmu_host_counter_mask() - Compute bitmask of host-reserved counters
* @pmu: Pointer to arm_pmu struct
@@ -232,7 +252,8 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu)
* If we aren't guest-owned then we know the guest isn't using
* the PMU anyway, so no need to bother with the swap.
*/
- if (!kvm_vcpu_pmu_is_partitioned(vcpu))
+ if (!kvm_vcpu_pmu_is_partitioned(vcpu) ||
+ vcpu->arch.pmu.access != VCPU_PMU_ACCESS_GUEST_OWNED)
return;
preempt_disable();
@@ -302,7 +323,8 @@ void kvm_pmu_put(struct kvm_vcpu *vcpu)
* accessing the PMU anyway, so no need to bother with the
* swap.
*/
- if (!kvm_vcpu_pmu_is_partitioned(vcpu))
+ if (!kvm_vcpu_pmu_is_partitioned(vcpu) ||
+ vcpu->arch.pmu.access != VCPU_PMU_ACCESS_GUEST_OWNED)
return;
preempt_disable();
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 93ccda941aa46..82665d54258df 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -7,6 +7,7 @@
#ifndef __ASM_ARM_KVM_PMU_H
#define __ASM_ARM_KVM_PMU_H
+#include <linux/kvm_types.h>
#include <linux/perf_event.h>
#include <linux/perf/arm_pmuv3.h>
#include <linux/perf/arm_pmu.h>
@@ -40,6 +41,7 @@ struct kvm_pmu {
int irq_num;
bool created;
bool irq_level;
+ enum vcpu_pmu_register_access access;
};
struct arm_pmu_entry {
@@ -103,6 +105,8 @@ u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu);
void kvm_pmu_load(struct kvm_vcpu *vcpu);
void kvm_pmu_put(struct kvm_vcpu *vcpu);
+void kvm_pmu_set_physical_access(struct kvm_vcpu *vcpu);
+
#if !defined(__KVM_NVHE_HYPERVISOR__)
bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu);
bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu);
@@ -177,6 +181,7 @@ static inline u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu)
}
static inline void kvm_pmu_load(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_put(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_set_physical_access(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
--
2.53.0.rc2.204.g2597b5adb4-goog